linux/fs/exofs/inode.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2005, 2006
   3 * Avishay Traeger (avishay@gmail.com)
   4 * Copyright (C) 2008, 2009
   5 * Boaz Harrosh <bharrosh@panasas.com>
   6 *
   7 * Copyrights for code taken from ext2:
   8 *     Copyright (C) 1992, 1993, 1994, 1995
   9 *     Remy Card (card@masi.ibp.fr)
  10 *     Laboratoire MASI - Institut Blaise Pascal
  11 *     Universite Pierre et Marie Curie (Paris VI)
  12 *     from
  13 *     linux/fs/minix/inode.c
  14 *     Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 * This file is part of exofs.
  17 *
  18 * exofs is free software; you can redistribute it and/or modify
  19 * it under the terms of the GNU General Public License as published by
  20 * the Free Software Foundation.  Since it is based on ext2, and the only
  21 * valid version of GPL for the Linux kernel is version 2, the only valid
  22 * version of GPL for exofs is version 2.
  23 *
  24 * exofs is distributed in the hope that it will be useful,
  25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  27 * GNU General Public License for more details.
  28 *
  29 * You should have received a copy of the GNU General Public License
  30 * along with exofs; if not, write to the Free Software
  31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  32 */
  33
  34#include <linux/slab.h>
  35
  36#include "exofs.h"
  37
  38#define EXOFS_DBGMSG2(M...) do {} while (0)
  39
  40enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
  41
  42unsigned exofs_max_io_pages(struct ore_layout *layout,
  43                            unsigned expected_pages)
  44{
  45        unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
  46
  47        /* TODO: easily support bio chaining */
  48        pages =  min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
  49        return pages;
  50}
  51
  52struct page_collect {
  53        struct exofs_sb_info *sbi;
  54        struct inode *inode;
  55        unsigned expected_pages;
  56        struct ore_io_state *ios;
  57
  58        struct page **pages;
  59        unsigned alloc_pages;
  60        unsigned nr_pages;
  61        unsigned long length;
  62        loff_t pg_first; /* keep 64bit also in 32-arches */
  63        bool read_4_write; /* This means two things: that the read is sync
  64                            * And the pages should not be unlocked.
  65                            */
  66        struct page *that_locked_page;
  67};
  68
  69static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  70                       struct inode *inode)
  71{
  72        struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  73
  74        pcol->sbi = sbi;
  75        pcol->inode = inode;
  76        pcol->expected_pages = expected_pages;
  77
  78        pcol->ios = NULL;
  79        pcol->pages = NULL;
  80        pcol->alloc_pages = 0;
  81        pcol->nr_pages = 0;
  82        pcol->length = 0;
  83        pcol->pg_first = -1;
  84        pcol->read_4_write = false;
  85        pcol->that_locked_page = NULL;
  86}
  87
  88static void _pcol_reset(struct page_collect *pcol)
  89{
  90        pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  91
  92        pcol->pages = NULL;
  93        pcol->alloc_pages = 0;
  94        pcol->nr_pages = 0;
  95        pcol->length = 0;
  96        pcol->pg_first = -1;
  97        pcol->ios = NULL;
  98        pcol->that_locked_page = NULL;
  99
 100        /* this is probably the end of the loop but in writes
 101         * it might not end here. don't be left with nothing
 102         */
 103        if (!pcol->expected_pages)
 104                pcol->expected_pages = MAX_PAGES_KMALLOC;
 105}
 106
 107static int pcol_try_alloc(struct page_collect *pcol)
 108{
 109        unsigned pages;
 110
 111        /* TODO: easily support bio chaining */
 112        pages =  exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
 113
 114        for (; pages; pages >>= 1) {
 115                pcol->pages = kmalloc(pages * sizeof(struct page *),
 116                                      GFP_KERNEL);
 117                if (likely(pcol->pages)) {
 118                        pcol->alloc_pages = pages;
 119                        return 0;
 120                }
 121        }
 122
 123        EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
 124                  pcol->expected_pages);
 125        return -ENOMEM;
 126}
 127
 128static void pcol_free(struct page_collect *pcol)
 129{
 130        kfree(pcol->pages);
 131        pcol->pages = NULL;
 132
 133        if (pcol->ios) {
 134                ore_put_io_state(pcol->ios);
 135                pcol->ios = NULL;
 136        }
 137}
 138
 139static int pcol_add_page(struct page_collect *pcol, struct page *page,
 140                         unsigned len)
 141{
 142        if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
 143                return -ENOMEM;
 144
 145        pcol->pages[pcol->nr_pages++] = page;
 146        pcol->length += len;
 147        return 0;
 148}
 149
 150enum {PAGE_WAS_NOT_IN_IO = 17};
 151static int update_read_page(struct page *page, int ret)
 152{
 153        switch (ret) {
 154        case 0:
 155                /* Everything is OK */
 156                SetPageUptodate(page);
 157                if (PageError(page))
 158                        ClearPageError(page);
 159                break;
 160        case -EFAULT:
 161                /* In this case we were trying to read something that wasn't on
 162                 * disk yet - return a page full of zeroes.  This should be OK,
 163                 * because the object should be empty (if there was a write
 164                 * before this read, the read would be waiting with the page
 165                 * locked */
 166                clear_highpage(page);
 167
 168                SetPageUptodate(page);
 169                if (PageError(page))
 170                        ClearPageError(page);
 171                EXOFS_DBGMSG("recovered read error\n");
 172                /* fall through */
 173        case PAGE_WAS_NOT_IN_IO:
 174                ret = 0; /* recovered error */
 175                break;
 176        default:
 177                SetPageError(page);
 178        }
 179        return ret;
 180}
 181
 182static void update_write_page(struct page *page, int ret)
 183{
 184        if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
 185                return; /* don't pass start don't collect $200 */
 186
 187        if (ret) {
 188                mapping_set_error(page->mapping, ret);
 189                SetPageError(page);
 190        }
 191        end_page_writeback(page);
 192}
 193
 194/* Called at the end of reads, to optionally unlock pages and update their
 195 * status.
 196 */
 197static int __readpages_done(struct page_collect *pcol)
 198{
 199        int i;
 200        u64 good_bytes;
 201        u64 length = 0;
 202        int ret = ore_check_io(pcol->ios, NULL);
 203
 204        if (likely(!ret)) {
 205                good_bytes = pcol->length;
 206                ret = PAGE_WAS_NOT_IN_IO;
 207        } else {
 208                good_bytes = 0;
 209        }
 210
 211        EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
 212                     " length=0x%lx nr_pages=%u\n",
 213                     pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
 214                     pcol->nr_pages);
 215
 216        for (i = 0; i < pcol->nr_pages; i++) {
 217                struct page *page = pcol->pages[i];
 218                struct inode *inode = page->mapping->host;
 219                int page_stat;
 220
 221                if (inode != pcol->inode)
 222                        continue; /* osd might add more pages at end */
 223
 224                if (likely(length < good_bytes))
 225                        page_stat = 0;
 226                else
 227                        page_stat = ret;
 228
 229                EXOFS_DBGMSG2("    readpages_done(0x%lx, 0x%lx) %s\n",
 230                          inode->i_ino, page->index,
 231                          page_stat ? "bad_bytes" : "good_bytes");
 232
 233                ret = update_read_page(page, page_stat);
 234                if (!pcol->read_4_write)
 235                        unlock_page(page);
 236                length += PAGE_SIZE;
 237        }
 238
 239        pcol_free(pcol);
 240        EXOFS_DBGMSG2("readpages_done END\n");
 241        return ret;
 242}
 243
 244/* callback of async reads */
 245static void readpages_done(struct ore_io_state *ios, void *p)
 246{
 247        struct page_collect *pcol = p;
 248
 249        __readpages_done(pcol);
 250        atomic_dec(&pcol->sbi->s_curr_pending);
 251        kfree(pcol);
 252}
 253
 254static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
 255{
 256        int i;
 257
 258        for (i = 0; i < pcol->nr_pages; i++) {
 259                struct page *page = pcol->pages[i];
 260
 261                if (rw == READ)
 262                        update_read_page(page, ret);
 263                else
 264                        update_write_page(page, ret);
 265
 266                unlock_page(page);
 267        }
 268}
 269
 270static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
 271        struct page_collect *pcol_src, struct page_collect *pcol)
 272{
 273        /* length was wrong or offset was not page aligned */
 274        BUG_ON(pcol_src->nr_pages < ios->nr_pages);
 275
 276        if (pcol_src->nr_pages > ios->nr_pages) {
 277                struct page **src_page;
 278                unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
 279                unsigned long len_less = pcol_src->length - ios->length;
 280                unsigned i;
 281                int ret;
 282
 283                /* This IO was trimmed */
 284                pcol_src->nr_pages = ios->nr_pages;
 285                pcol_src->length = ios->length;
 286
 287                /* Left over pages are passed to the next io */
 288                pcol->expected_pages += pages_less;
 289                pcol->nr_pages = pages_less;
 290                pcol->length = len_less;
 291                src_page = pcol_src->pages + pcol_src->nr_pages;
 292                pcol->pg_first = (*src_page)->index;
 293
 294                ret = pcol_try_alloc(pcol);
 295                if (unlikely(ret))
 296                        return ret;
 297
 298                for (i = 0; i < pages_less; ++i)
 299                        pcol->pages[i] = *src_page++;
 300
 301                EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
 302                        "pages_less=0x%x expected_pages=0x%x "
 303                        "next_offset=0x%llx next_len=0x%lx\n",
 304                        pcol_src->nr_pages, pages_less, pcol->expected_pages,
 305                        pcol->pg_first * PAGE_SIZE, pcol->length);
 306        }
 307        return 0;
 308}
 309
 310static int read_exec(struct page_collect *pcol)
 311{
 312        struct exofs_i_info *oi = exofs_i(pcol->inode);
 313        struct ore_io_state *ios;
 314        struct page_collect *pcol_copy = NULL;
 315        int ret;
 316
 317        if (!pcol->pages)
 318                return 0;
 319
 320        if (!pcol->ios) {
 321                int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
 322                                             pcol->pg_first << PAGE_CACHE_SHIFT,
 323                                             pcol->length, &pcol->ios);
 324
 325                if (ret)
 326                        return ret;
 327        }
 328
 329        ios = pcol->ios;
 330        ios->pages = pcol->pages;
 331
 332        if (pcol->read_4_write) {
 333                ore_read(pcol->ios);
 334                return __readpages_done(pcol);
 335        }
 336
 337        pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
 338        if (!pcol_copy) {
 339                ret = -ENOMEM;
 340                goto err;
 341        }
 342
 343        *pcol_copy = *pcol;
 344        ios->done = readpages_done;
 345        ios->private = pcol_copy;
 346
 347        /* pages ownership was passed to pcol_copy */
 348        _pcol_reset(pcol);
 349
 350        ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
 351        if (unlikely(ret))
 352                goto err;
 353
 354        EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
 355                pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
 356
 357        ret = ore_read(ios);
 358        if (unlikely(ret))
 359                goto err;
 360
 361        atomic_inc(&pcol->sbi->s_curr_pending);
 362
 363        return 0;
 364
 365err:
 366        if (!pcol->read_4_write)
 367                _unlock_pcol_pages(pcol, ret, READ);
 368
 369        pcol_free(pcol);
 370
 371        kfree(pcol_copy);
 372        return ret;
 373}
 374
 375/* readpage_strip is called either directly from readpage() or by the VFS from
 376 * within read_cache_pages(), to add one more page to be read. It will try to
 377 * collect as many contiguous pages as posible. If a discontinuity is
 378 * encountered, or it runs out of resources, it will submit the previous segment
 379 * and will start a new collection. Eventually caller must submit the last
 380 * segment if present.
 381 */
 382static int readpage_strip(void *data, struct page *page)
 383{
 384        struct page_collect *pcol = data;
 385        struct inode *inode = pcol->inode;
 386        struct exofs_i_info *oi = exofs_i(inode);
 387        loff_t i_size = i_size_read(inode);
 388        pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
 389        size_t len;
 390        int ret;
 391
 392        /* FIXME: Just for debugging, will be removed */
 393        if (PageUptodate(page))
 394                EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
 395                          page->index);
 396
 397        pcol->that_locked_page = page;
 398
 399        if (page->index < end_index)
 400                len = PAGE_CACHE_SIZE;
 401        else if (page->index == end_index)
 402                len = i_size & ~PAGE_CACHE_MASK;
 403        else
 404                len = 0;
 405
 406        if (!len || !obj_created(oi)) {
 407                /* this will be out of bounds, or doesn't exist yet.
 408                 * Current page is cleared and the request is split
 409                 */
 410                clear_highpage(page);
 411
 412                SetPageUptodate(page);
 413                if (PageError(page))
 414                        ClearPageError(page);
 415
 416                if (!pcol->read_4_write)
 417                        unlock_page(page);
 418                EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
 419                             "read_4_write=%d index=0x%lx end_index=0x%lx "
 420                             "splitting\n", inode->i_ino, len,
 421                             pcol->read_4_write, page->index, end_index);
 422
 423                return read_exec(pcol);
 424        }
 425
 426try_again:
 427
 428        if (unlikely(pcol->pg_first == -1)) {
 429                pcol->pg_first = page->index;
 430        } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
 431                   page->index)) {
 432                /* Discontinuity detected, split the request */
 433                ret = read_exec(pcol);
 434                if (unlikely(ret))
 435                        goto fail;
 436                goto try_again;
 437        }
 438
 439        if (!pcol->pages) {
 440                ret = pcol_try_alloc(pcol);
 441                if (unlikely(ret))
 442                        goto fail;
 443        }
 444
 445        if (len != PAGE_CACHE_SIZE)
 446                zero_user(page, len, PAGE_CACHE_SIZE - len);
 447
 448        EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
 449                     inode->i_ino, page->index, len);
 450
 451        ret = pcol_add_page(pcol, page, len);
 452        if (ret) {
 453                EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
 454                          "this_len=0x%zx nr_pages=%u length=0x%lx\n",
 455                          page, len, pcol->nr_pages, pcol->length);
 456
 457                /* split the request, and start again with current page */
 458                ret = read_exec(pcol);
 459                if (unlikely(ret))
 460                        goto fail;
 461
 462                goto try_again;
 463        }
 464
 465        return 0;
 466
 467fail:
 468        /* SetPageError(page); ??? */
 469        unlock_page(page);
 470        return ret;
 471}
 472
 473static int exofs_readpages(struct file *file, struct address_space *mapping,
 474                           struct list_head *pages, unsigned nr_pages)
 475{
 476        struct page_collect pcol;
 477        int ret;
 478
 479        _pcol_init(&pcol, nr_pages, mapping->host);
 480
 481        ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
 482        if (ret) {
 483                EXOFS_ERR("read_cache_pages => %d\n", ret);
 484                return ret;
 485        }
 486
 487        ret = read_exec(&pcol);
 488        if (unlikely(ret))
 489                return ret;
 490
 491        return read_exec(&pcol);
 492}
 493
 494static int _readpage(struct page *page, bool read_4_write)
 495{
 496        struct page_collect pcol;
 497        int ret;
 498
 499        _pcol_init(&pcol, 1, page->mapping->host);
 500
 501        pcol.read_4_write = read_4_write;
 502        ret = readpage_strip(&pcol, page);
 503        if (ret) {
 504                EXOFS_ERR("_readpage => %d\n", ret);
 505                return ret;
 506        }
 507
 508        return read_exec(&pcol);
 509}
 510
 511/*
 512 * We don't need the file
 513 */
 514static int exofs_readpage(struct file *file, struct page *page)
 515{
 516        return _readpage(page, false);
 517}
 518
 519/* Callback for osd_write. All writes are asynchronous */
 520static void writepages_done(struct ore_io_state *ios, void *p)
 521{
 522        struct page_collect *pcol = p;
 523        int i;
 524        u64  good_bytes;
 525        u64  length = 0;
 526        int ret = ore_check_io(ios, NULL);
 527
 528        atomic_dec(&pcol->sbi->s_curr_pending);
 529
 530        if (likely(!ret)) {
 531                good_bytes = pcol->length;
 532                ret = PAGE_WAS_NOT_IN_IO;
 533        } else {
 534                good_bytes = 0;
 535        }
 536
 537        EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
 538                     " length=0x%lx nr_pages=%u\n",
 539                     pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
 540                     pcol->nr_pages);
 541
 542        for (i = 0; i < pcol->nr_pages; i++) {
 543                struct page *page = pcol->pages[i];
 544                struct inode *inode = page->mapping->host;
 545                int page_stat;
 546
 547                if (inode != pcol->inode)
 548                        continue; /* osd might add more pages to a bio */
 549
 550                if (likely(length < good_bytes))
 551                        page_stat = 0;
 552                else
 553                        page_stat = ret;
 554
 555                update_write_page(page, page_stat);
 556                unlock_page(page);
 557                EXOFS_DBGMSG2("    writepages_done(0x%lx, 0x%lx) status=%d\n",
 558                             inode->i_ino, page->index, page_stat);
 559
 560                length += PAGE_SIZE;
 561        }
 562
 563        pcol_free(pcol);
 564        kfree(pcol);
 565        EXOFS_DBGMSG2("writepages_done END\n");
 566}
 567
 568static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 569{
 570        struct page_collect *pcol = priv;
 571        pgoff_t index = offset / PAGE_SIZE;
 572
 573        if (!pcol->that_locked_page ||
 574            (pcol->that_locked_page->index != index)) {
 575                struct page *page = find_get_page(pcol->inode->i_mapping, index);
 576
 577                if (!page) {
 578                        page = find_or_create_page(pcol->inode->i_mapping,
 579                                                   index, GFP_NOFS);
 580                        if (unlikely(!page)) {
 581                                EXOFS_DBGMSG("grab_cache_page Failed "
 582                                        "index=0x%llx\n", _LLU(index));
 583                                return NULL;
 584                        }
 585                        unlock_page(page);
 586                }
 587                if (PageDirty(page) || PageWriteback(page))
 588                        *uptodate = true;
 589                else
 590                        *uptodate = PageUptodate(page);
 591                EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
 592                return page;
 593        } else {
 594                EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
 595                             pcol->that_locked_page->index);
 596                *uptodate = true;
 597                return pcol->that_locked_page;
 598        }
 599}
 600
 601static void __r4w_put_page(void *priv, struct page *page)
 602{
 603        struct page_collect *pcol = priv;
 604
 605        if (pcol->that_locked_page != page) {
 606                EXOFS_DBGMSG("index=0x%lx\n", page->index);
 607                page_cache_release(page);
 608                return;
 609        }
 610        EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
 611}
 612
 613static const struct _ore_r4w_op _r4w_op = {
 614        .get_page = &__r4w_get_page,
 615        .put_page = &__r4w_put_page,
 616};
 617
 618static int write_exec(struct page_collect *pcol)
 619{
 620        struct exofs_i_info *oi = exofs_i(pcol->inode);
 621        struct ore_io_state *ios;
 622        struct page_collect *pcol_copy = NULL;
 623        int ret;
 624
 625        if (!pcol->pages)
 626                return 0;
 627
 628        BUG_ON(pcol->ios);
 629        ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
 630                                 pcol->pg_first << PAGE_CACHE_SHIFT,
 631                                 pcol->length, &pcol->ios);
 632        if (unlikely(ret))
 633                goto err;
 634
 635        pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
 636        if (!pcol_copy) {
 637                EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
 638                ret = -ENOMEM;
 639                goto err;
 640        }
 641
 642        *pcol_copy = *pcol;
 643
 644        ios = pcol->ios;
 645        ios->pages = pcol_copy->pages;
 646        ios->done = writepages_done;
 647        ios->r4w = &_r4w_op;
 648        ios->private = pcol_copy;
 649
 650        /* pages ownership was passed to pcol_copy */
 651        _pcol_reset(pcol);
 652
 653        ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
 654        if (unlikely(ret))
 655                goto err;
 656
 657        EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
 658                pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
 659
 660        ret = ore_write(ios);
 661        if (unlikely(ret)) {
 662                EXOFS_ERR("write_exec: ore_write() Failed\n");
 663                goto err;
 664        }
 665
 666        atomic_inc(&pcol->sbi->s_curr_pending);
 667        return 0;
 668
 669err:
 670        _unlock_pcol_pages(pcol, ret, WRITE);
 671        pcol_free(pcol);
 672        kfree(pcol_copy);
 673
 674        return ret;
 675}
 676
 677/* writepage_strip is called either directly from writepage() or by the VFS from
 678 * within write_cache_pages(), to add one more page to be written to storage.
 679 * It will try to collect as many contiguous pages as possible. If a
 680 * discontinuity is encountered or it runs out of resources it will submit the
 681 * previous segment and will start a new collection.
 682 * Eventually caller must submit the last segment if present.
 683 */
 684static int writepage_strip(struct page *page,
 685                           struct writeback_control *wbc_unused, void *data)
 686{
 687        struct page_collect *pcol = data;
 688        struct inode *inode = pcol->inode;
 689        struct exofs_i_info *oi = exofs_i(inode);
 690        loff_t i_size = i_size_read(inode);
 691        pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
 692        size_t len;
 693        int ret;
 694
 695        BUG_ON(!PageLocked(page));
 696
 697        ret = wait_obj_created(oi);
 698        if (unlikely(ret))
 699                goto fail;
 700
 701        if (page->index < end_index)
 702                /* in this case, the page is within the limits of the file */
 703                len = PAGE_CACHE_SIZE;
 704        else {
 705                len = i_size & ~PAGE_CACHE_MASK;
 706
 707                if (page->index > end_index || !len) {
 708                        /* in this case, the page is outside the limits
 709                         * (truncate in progress)
 710                         */
 711                        ret = write_exec(pcol);
 712                        if (unlikely(ret))
 713                                goto fail;
 714                        if (PageError(page))
 715                                ClearPageError(page);
 716                        unlock_page(page);
 717                        EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
 718                                     "outside the limits\n",
 719                                     inode->i_ino, page->index);
 720                        return 0;
 721                }
 722        }
 723
 724try_again:
 725
 726        if (unlikely(pcol->pg_first == -1)) {
 727                pcol->pg_first = page->index;
 728        } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
 729                   page->index)) {
 730                /* Discontinuity detected, split the request */
 731                ret = write_exec(pcol);
 732                if (unlikely(ret))
 733                        goto fail;
 734
 735                EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
 736                             inode->i_ino, page->index);
 737                goto try_again;
 738        }
 739
 740        if (!pcol->pages) {
 741                ret = pcol_try_alloc(pcol);
 742                if (unlikely(ret))
 743                        goto fail;
 744        }
 745
 746        EXOFS_DBGMSG2("    writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
 747                     inode->i_ino, page->index, len);
 748
 749        ret = pcol_add_page(pcol, page, len);
 750        if (unlikely(ret)) {
 751                EXOFS_DBGMSG2("Failed pcol_add_page "
 752                             "nr_pages=%u total_length=0x%lx\n",
 753                             pcol->nr_pages, pcol->length);
 754
 755                /* split the request, next loop will start again */
 756                ret = write_exec(pcol);
 757                if (unlikely(ret)) {
 758                        EXOFS_DBGMSG("write_exec failed => %d", ret);
 759                        goto fail;
 760                }
 761
 762                goto try_again;
 763        }
 764
 765        BUG_ON(PageWriteback(page));
 766        set_page_writeback(page);
 767
 768        return 0;
 769
 770fail:
 771        EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
 772                     inode->i_ino, page->index, ret);
 773        set_bit(AS_EIO, &page->mapping->flags);
 774        unlock_page(page);
 775        return ret;
 776}
 777
 778static int exofs_writepages(struct address_space *mapping,
 779                       struct writeback_control *wbc)
 780{
 781        struct page_collect pcol;
 782        long start, end, expected_pages;
 783        int ret;
 784
 785        start = wbc->range_start >> PAGE_CACHE_SHIFT;
 786        end = (wbc->range_end == LLONG_MAX) ?
 787                        start + mapping->nrpages :
 788                        wbc->range_end >> PAGE_CACHE_SHIFT;
 789
 790        if (start || end)
 791                expected_pages = end - start + 1;
 792        else
 793                expected_pages = mapping->nrpages;
 794
 795        if (expected_pages < 32L)
 796                expected_pages = 32L;
 797
 798        EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
 799                     "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
 800                     mapping->host->i_ino, wbc->range_start, wbc->range_end,
 801                     mapping->nrpages, start, end, expected_pages);
 802
 803        _pcol_init(&pcol, expected_pages, mapping->host);
 804
 805        ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
 806        if (unlikely(ret)) {
 807                EXOFS_ERR("write_cache_pages => %d\n", ret);
 808                return ret;
 809        }
 810
 811        ret = write_exec(&pcol);
 812        if (unlikely(ret))
 813                return ret;
 814
 815        if (wbc->sync_mode == WB_SYNC_ALL) {
 816                return write_exec(&pcol); /* pump the last reminder */
 817        } else if (pcol.nr_pages) {
 818                /* not SYNC let the reminder join the next writeout */
 819                unsigned i;
 820
 821                for (i = 0; i < pcol.nr_pages; i++) {
 822                        struct page *page = pcol.pages[i];
 823
 824                        end_page_writeback(page);
 825                        set_page_dirty(page);
 826                        unlock_page(page);
 827                }
 828        }
 829        return 0;
 830}
 831
 832/*
 833static int exofs_writepage(struct page *page, struct writeback_control *wbc)
 834{
 835        struct page_collect pcol;
 836        int ret;
 837
 838        _pcol_init(&pcol, 1, page->mapping->host);
 839
 840        ret = writepage_strip(page, NULL, &pcol);
 841        if (ret) {
 842                EXOFS_ERR("exofs_writepage => %d\n", ret);
 843                return ret;
 844        }
 845
 846        return write_exec(&pcol);
 847}
 848*/
 849/* i_mutex held using inode->i_size directly */
 850static void _write_failed(struct inode *inode, loff_t to)
 851{
 852        if (to > inode->i_size)
 853                truncate_pagecache(inode, to, inode->i_size);
 854}
 855
 856int exofs_write_begin(struct file *file, struct address_space *mapping,
 857                loff_t pos, unsigned len, unsigned flags,
 858                struct page **pagep, void **fsdata)
 859{
 860        int ret = 0;
 861        struct page *page;
 862
 863        page = *pagep;
 864        if (page == NULL) {
 865                ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
 866                                         fsdata);
 867                if (ret) {
 868                        EXOFS_DBGMSG("simple_write_begin failed\n");
 869                        goto out;
 870                }
 871
 872                page = *pagep;
 873        }
 874
 875         /* read modify write */
 876        if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
 877                loff_t i_size = i_size_read(mapping->host);
 878                pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
 879                size_t rlen;
 880
 881                if (page->index < end_index)
 882                        rlen = PAGE_CACHE_SIZE;
 883                else if (page->index == end_index)
 884                        rlen = i_size & ~PAGE_CACHE_MASK;
 885                else
 886                        rlen = 0;
 887
 888                if (!rlen) {
 889                        clear_highpage(page);
 890                        SetPageUptodate(page);
 891                        goto out;
 892                }
 893
 894                ret = _readpage(page, true);
 895                if (ret) {
 896                        /*SetPageError was done by _readpage. Is it ok?*/
 897                        unlock_page(page);
 898                        EXOFS_DBGMSG("__readpage failed\n");
 899                }
 900        }
 901out:
 902        if (unlikely(ret))
 903                _write_failed(mapping->host, pos + len);
 904
 905        return ret;
 906}
 907
 908static int exofs_write_begin_export(struct file *file,
 909                struct address_space *mapping,
 910                loff_t pos, unsigned len, unsigned flags,
 911                struct page **pagep, void **fsdata)
 912{
 913        *pagep = NULL;
 914
 915        return exofs_write_begin(file, mapping, pos, len, flags, pagep,
 916                                        fsdata);
 917}
 918
 919static int exofs_write_end(struct file *file, struct address_space *mapping,
 920                        loff_t pos, unsigned len, unsigned copied,
 921                        struct page *page, void *fsdata)
 922{
 923        struct inode *inode = mapping->host;
 924        /* According to comment in simple_write_end i_mutex is held */
 925        loff_t i_size = inode->i_size;
 926        int ret;
 927
 928        ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
 929        if (unlikely(ret))
 930                _write_failed(inode, pos + len);
 931
 932        /* TODO: once simple_write_end marks inode dirty remove */
 933        if (i_size != inode->i_size)
 934                mark_inode_dirty(inode);
 935        return ret;
 936}
 937
 938static int exofs_releasepage(struct page *page, gfp_t gfp)
 939{
 940        EXOFS_DBGMSG("page 0x%lx\n", page->index);
 941        WARN_ON(1);
 942        return 0;
 943}
 944
 945static void exofs_invalidatepage(struct page *page, unsigned long offset)
 946{
 947        EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
 948        WARN_ON(1);
 949}
 950
 951const struct address_space_operations exofs_aops = {
 952        .readpage       = exofs_readpage,
 953        .readpages      = exofs_readpages,
 954        .writepage      = NULL,
 955        .writepages     = exofs_writepages,
 956        .write_begin    = exofs_write_begin_export,
 957        .write_end      = exofs_write_end,
 958        .releasepage    = exofs_releasepage,
 959        .set_page_dirty = __set_page_dirty_nobuffers,
 960        .invalidatepage = exofs_invalidatepage,
 961
 962        /* Not implemented Yet */
 963        .bmap           = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
 964        .direct_IO      = NULL, /* TODO: Should be trivial to do */
 965
 966        /* With these NULL has special meaning or default is not exported */
 967        .get_xip_mem    = NULL,
 968        .migratepage    = NULL,
 969        .launder_page   = NULL,
 970        .is_partially_uptodate = NULL,
 971        .error_remove_page = NULL,
 972};
 973
 974/******************************************************************************
 975 * INODE OPERATIONS
 976 *****************************************************************************/
 977
 978/*
 979 * Test whether an inode is a fast symlink.
 980 */
 981static inline int exofs_inode_is_fast_symlink(struct inode *inode)
 982{
 983        struct exofs_i_info *oi = exofs_i(inode);
 984
 985        return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
 986}
 987
 988static int _do_truncate(struct inode *inode, loff_t newsize)
 989{
 990        struct exofs_i_info *oi = exofs_i(inode);
 991        struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
 992        int ret;
 993
 994        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 995
 996        ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
 997        if (likely(!ret))
 998                truncate_setsize(inode, newsize);
 999
1000        EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
1001                     inode->i_ino, newsize, ret);
1002        return ret;
1003}
1004
1005/*
1006 * Set inode attributes - update size attribute on OSD if needed,
1007 *                        otherwise just call generic functions.
1008 */
1009int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
1010{
1011        struct inode *inode = dentry->d_inode;
1012        int error;
1013
1014        /* if we are about to modify an object, and it hasn't been
1015         * created yet, wait
1016         */
1017        error = wait_obj_created(exofs_i(inode));
1018        if (unlikely(error))
1019                return error;
1020
1021        error = inode_change_ok(inode, iattr);
1022        if (unlikely(error))
1023                return error;
1024
1025        if ((iattr->ia_valid & ATTR_SIZE) &&
1026            iattr->ia_size != i_size_read(inode)) {
1027                error = _do_truncate(inode, iattr->ia_size);
1028                if (unlikely(error))
1029                        return error;
1030        }
1031
1032        setattr_copy(inode, iattr);
1033        mark_inode_dirty(inode);
1034        return 0;
1035}
1036
1037static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
1038        EXOFS_APAGE_FS_DATA,
1039        EXOFS_ATTR_INODE_FILE_LAYOUT,
1040        0);
1041static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
1042        EXOFS_APAGE_FS_DATA,
1043        EXOFS_ATTR_INODE_DIR_LAYOUT,
1044        0);
1045
1046/*
1047 * Read the Linux inode info from the OSD, and return it as is. In exofs the
1048 * inode info is in an application specific page/attribute of the osd-object.
1049 */
1050static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
1051                    struct exofs_fcb *inode)
1052{
1053        struct exofs_sb_info *sbi = sb->s_fs_info;
1054        struct osd_attr attrs[] = {
1055                [0] = g_attr_inode_data,
1056                [1] = g_attr_inode_file_layout,
1057                [2] = g_attr_inode_dir_layout,
1058        };
1059        struct ore_io_state *ios;
1060        struct exofs_on_disk_inode_layout *layout;
1061        int ret;
1062
1063        ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1064        if (unlikely(ret)) {
1065                EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1066                return ret;
1067        }
1068
1069        attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1070        attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1071
1072        ios->in_attr = attrs;
1073        ios->in_attr_len = ARRAY_SIZE(attrs);
1074
1075        ret = ore_read(ios);
1076        if (unlikely(ret)) {
1077                EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
1078                          _LLU(oi->one_comp.obj.id), ret);
1079                memset(inode, 0, sizeof(*inode));
1080                inode->i_mode = 0040000 | (0777 & ~022);
1081                /* If object is lost on target we might as well enable it's
1082                 * delete.
1083                 */
1084                if ((ret == -ENOENT) || (ret == -EINVAL))
1085                        ret = 0;
1086                goto out;
1087        }
1088
1089        ret = extract_attr_from_ios(ios, &attrs[0]);
1090        if (ret) {
1091                EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1092                goto out;
1093        }
1094        WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
1095        memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
1096
1097        ret = extract_attr_from_ios(ios, &attrs[1]);
1098        if (ret) {
1099                EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1100                goto out;
1101        }
1102        if (attrs[1].len) {
1103                layout = attrs[1].val_ptr;
1104                if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1105                        EXOFS_ERR("%s: unsupported files layout %d\n",
1106                                __func__, layout->gen_func);
1107                        ret = -ENOTSUPP;
1108                        goto out;
1109                }
1110        }
1111
1112        ret = extract_attr_from_ios(ios, &attrs[2]);
1113        if (ret) {
1114                EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1115                goto out;
1116        }
1117        if (attrs[2].len) {
1118                layout = attrs[2].val_ptr;
1119                if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1120                        EXOFS_ERR("%s: unsupported meta-data layout %d\n",
1121                                __func__, layout->gen_func);
1122                        ret = -ENOTSUPP;
1123                        goto out;
1124                }
1125        }
1126
1127out:
1128        ore_put_io_state(ios);
1129        return ret;
1130}
1131
1132static void __oi_init(struct exofs_i_info *oi)
1133{
1134        init_waitqueue_head(&oi->i_wq);
1135        oi->i_flags = 0;
1136}
1137/*
1138 * Fill in an inode read from the OSD and set it up for use
1139 */
1140struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
1141{
1142        struct exofs_i_info *oi;
1143        struct exofs_fcb fcb;
1144        struct inode *inode;
1145        int ret;
1146
1147        inode = iget_locked(sb, ino);
1148        if (!inode)
1149                return ERR_PTR(-ENOMEM);
1150        if (!(inode->i_state & I_NEW))
1151                return inode;
1152        oi = exofs_i(inode);
1153        __oi_init(oi);
1154        exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1155                         exofs_oi_objno(oi));
1156
1157        /* read the inode from the osd */
1158        ret = exofs_get_inode(sb, oi, &fcb);
1159        if (ret)
1160                goto bad_inode;
1161
1162        set_obj_created(oi);
1163
1164        /* copy stuff from on-disk struct to in-memory struct */
1165        inode->i_mode = le16_to_cpu(fcb.i_mode);
1166        inode->i_uid = le32_to_cpu(fcb.i_uid);
1167        inode->i_gid = le32_to_cpu(fcb.i_gid);
1168        set_nlink(inode, le16_to_cpu(fcb.i_links_count));
1169        inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
1170        inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
1171        inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
1172        inode->i_ctime.tv_nsec =
1173                inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
1174        oi->i_commit_size = le64_to_cpu(fcb.i_size);
1175        i_size_write(inode, oi->i_commit_size);
1176        inode->i_blkbits = EXOFS_BLKSHIFT;
1177        inode->i_generation = le32_to_cpu(fcb.i_generation);
1178
1179        oi->i_dir_start_lookup = 0;
1180
1181        if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
1182                ret = -ESTALE;
1183                goto bad_inode;
1184        }
1185
1186        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1187                if (fcb.i_data[0])
1188                        inode->i_rdev =
1189                                old_decode_dev(le32_to_cpu(fcb.i_data[0]));
1190                else
1191                        inode->i_rdev =
1192                                new_decode_dev(le32_to_cpu(fcb.i_data[1]));
1193        } else {
1194                memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
1195        }
1196
1197        inode->i_mapping->backing_dev_info = sb->s_bdi;
1198        if (S_ISREG(inode->i_mode)) {
1199                inode->i_op = &exofs_file_inode_operations;
1200                inode->i_fop = &exofs_file_operations;
1201                inode->i_mapping->a_ops = &exofs_aops;
1202        } else if (S_ISDIR(inode->i_mode)) {
1203                inode->i_op = &exofs_dir_inode_operations;
1204                inode->i_fop = &exofs_dir_operations;
1205                inode->i_mapping->a_ops = &exofs_aops;
1206        } else if (S_ISLNK(inode->i_mode)) {
1207                if (exofs_inode_is_fast_symlink(inode))
1208                        inode->i_op = &exofs_fast_symlink_inode_operations;
1209                else {
1210                        inode->i_op = &exofs_symlink_inode_operations;
1211                        inode->i_mapping->a_ops = &exofs_aops;
1212                }
1213        } else {
1214                inode->i_op = &exofs_special_inode_operations;
1215                if (fcb.i_data[0])
1216                        init_special_inode(inode, inode->i_mode,
1217                           old_decode_dev(le32_to_cpu(fcb.i_data[0])));
1218                else
1219                        init_special_inode(inode, inode->i_mode,
1220                           new_decode_dev(le32_to_cpu(fcb.i_data[1])));
1221        }
1222
1223        unlock_new_inode(inode);
1224        return inode;
1225
1226bad_inode:
1227        iget_failed(inode);
1228        return ERR_PTR(ret);
1229}
1230
1231int __exofs_wait_obj_created(struct exofs_i_info *oi)
1232{
1233        if (!obj_created(oi)) {
1234                EXOFS_DBGMSG("!obj_created\n");
1235                BUG_ON(!obj_2bcreated(oi));
1236                wait_event(oi->i_wq, obj_created(oi));
1237                EXOFS_DBGMSG("wait_event done\n");
1238        }
1239        return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
1240}
1241
1242/*
1243 * Callback function from exofs_new_inode().  The important thing is that we
1244 * set the obj_created flag so that other methods know that the object exists on
1245 * the OSD.
1246 */
1247static void create_done(struct ore_io_state *ios, void *p)
1248{
1249        struct inode *inode = p;
1250        struct exofs_i_info *oi = exofs_i(inode);
1251        struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1252        int ret;
1253
1254        ret = ore_check_io(ios, NULL);
1255        ore_put_io_state(ios);
1256
1257        atomic_dec(&sbi->s_curr_pending);
1258
1259        if (unlikely(ret)) {
1260                EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1261                          _LLU(exofs_oi_objno(oi)),
1262                          _LLU(oi->one_comp.obj.partition));
1263                /*TODO: When FS is corrupted creation can fail, object already
1264                 * exist. Get rid of this asynchronous creation, if exist
1265                 * increment the obj counter and try the next object. Until we
1266                 * succeed. All these dangling objects will be made into lost
1267                 * files by chkfs.exofs
1268                 */
1269        }
1270
1271        set_obj_created(oi);
1272
1273        wake_up(&oi->i_wq);
1274}
1275
1276/*
1277 * Set up a new inode and create an object for it on the OSD
1278 */
1279struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
1280{
1281        struct super_block *sb = dir->i_sb;
1282        struct exofs_sb_info *sbi = sb->s_fs_info;
1283        struct inode *inode;
1284        struct exofs_i_info *oi;
1285        struct ore_io_state *ios;
1286        int ret;
1287
1288        inode = new_inode(sb);
1289        if (!inode)
1290                return ERR_PTR(-ENOMEM);
1291
1292        oi = exofs_i(inode);
1293        __oi_init(oi);
1294
1295        set_obj_2bcreated(oi);
1296
1297        inode->i_mapping->backing_dev_info = sb->s_bdi;
1298        inode_init_owner(inode, dir, mode);
1299        inode->i_ino = sbi->s_nextid++;
1300        inode->i_blkbits = EXOFS_BLKSHIFT;
1301        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1302        oi->i_commit_size = inode->i_size = 0;
1303        spin_lock(&sbi->s_next_gen_lock);
1304        inode->i_generation = sbi->s_next_generation++;
1305        spin_unlock(&sbi->s_next_gen_lock);
1306        insert_inode_hash(inode);
1307
1308        exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1309                         exofs_oi_objno(oi));
1310        exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
1311
1312        mark_inode_dirty(inode);
1313
1314        ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1315        if (unlikely(ret)) {
1316                EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1317                return ERR_PTR(ret);
1318        }
1319
1320        ios->done = create_done;
1321        ios->private = inode;
1322
1323        ret = ore_create(ios);
1324        if (ret) {
1325                ore_put_io_state(ios);
1326                return ERR_PTR(ret);
1327        }
1328        atomic_inc(&sbi->s_curr_pending);
1329
1330        return inode;
1331}
1332
1333/*
1334 * struct to pass two arguments to update_inode's callback
1335 */
1336struct updatei_args {
1337        struct exofs_sb_info    *sbi;
1338        struct exofs_fcb        fcb;
1339};
1340
1341/*
1342 * Callback function from exofs_update_inode().
1343 */
1344static void updatei_done(struct ore_io_state *ios, void *p)
1345{
1346        struct updatei_args *args = p;
1347
1348        ore_put_io_state(ios);
1349
1350        atomic_dec(&args->sbi->s_curr_pending);
1351
1352        kfree(args);
1353}
1354
1355/*
1356 * Write the inode to the OSD.  Just fill up the struct, and set the attribute
1357 * synchronously or asynchronously depending on the do_sync flag.
1358 */
1359static int exofs_update_inode(struct inode *inode, int do_sync)
1360{
1361        struct exofs_i_info *oi = exofs_i(inode);
1362        struct super_block *sb = inode->i_sb;
1363        struct exofs_sb_info *sbi = sb->s_fs_info;
1364        struct ore_io_state *ios;
1365        struct osd_attr attr;
1366        struct exofs_fcb *fcb;
1367        struct updatei_args *args;
1368        int ret;
1369
1370        args = kzalloc(sizeof(*args), GFP_KERNEL);
1371        if (!args) {
1372                EXOFS_DBGMSG("Failed kzalloc of args\n");
1373                return -ENOMEM;
1374        }
1375
1376        fcb = &args->fcb;
1377
1378        fcb->i_mode = cpu_to_le16(inode->i_mode);
1379        fcb->i_uid = cpu_to_le32(inode->i_uid);
1380        fcb->i_gid = cpu_to_le32(inode->i_gid);
1381        fcb->i_links_count = cpu_to_le16(inode->i_nlink);
1382        fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1383        fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1384        fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1385        oi->i_commit_size = i_size_read(inode);
1386        fcb->i_size = cpu_to_le64(oi->i_commit_size);
1387        fcb->i_generation = cpu_to_le32(inode->i_generation);
1388
1389        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1390                if (old_valid_dev(inode->i_rdev)) {
1391                        fcb->i_data[0] =
1392                                cpu_to_le32(old_encode_dev(inode->i_rdev));
1393                        fcb->i_data[1] = 0;
1394                } else {
1395                        fcb->i_data[0] = 0;
1396                        fcb->i_data[1] =
1397                                cpu_to_le32(new_encode_dev(inode->i_rdev));
1398                        fcb->i_data[2] = 0;
1399                }
1400        } else
1401                memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1402
1403        ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1404        if (unlikely(ret)) {
1405                EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1406                goto free_args;
1407        }
1408
1409        attr = g_attr_inode_data;
1410        attr.val_ptr = fcb;
1411        ios->out_attr_len = 1;
1412        ios->out_attr = &attr;
1413
1414        wait_obj_created(oi);
1415
1416        if (!do_sync) {
1417                args->sbi = sbi;
1418                ios->done = updatei_done;
1419                ios->private = args;
1420        }
1421
1422        ret = ore_write(ios);
1423        if (!do_sync && !ret) {
1424                atomic_inc(&sbi->s_curr_pending);
1425                goto out; /* deallocation in updatei_done */
1426        }
1427
1428        ore_put_io_state(ios);
1429free_args:
1430        kfree(args);
1431out:
1432        EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1433                     inode->i_ino, do_sync, ret);
1434        return ret;
1435}
1436
1437int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
1438{
1439        /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1440        return exofs_update_inode(inode, 1);
1441}
1442
1443/*
1444 * Callback function from exofs_delete_inode() - don't have much cleaning up to
1445 * do.
1446 */
1447static void delete_done(struct ore_io_state *ios, void *p)
1448{
1449        struct exofs_sb_info *sbi = p;
1450
1451        ore_put_io_state(ios);
1452
1453        atomic_dec(&sbi->s_curr_pending);
1454}
1455
1456/*
1457 * Called when the refcount of an inode reaches zero.  We remove the object
1458 * from the OSD here.  We make sure the object was created before we try and
1459 * delete it.
1460 */
1461void exofs_evict_inode(struct inode *inode)
1462{
1463        struct exofs_i_info *oi = exofs_i(inode);
1464        struct super_block *sb = inode->i_sb;
1465        struct exofs_sb_info *sbi = sb->s_fs_info;
1466        struct ore_io_state *ios;
1467        int ret;
1468
1469        truncate_inode_pages(&inode->i_data, 0);
1470
1471        /* TODO: should do better here */
1472        if (inode->i_nlink || is_bad_inode(inode))
1473                goto no_delete;
1474
1475        inode->i_size = 0;
1476        clear_inode(inode);
1477
1478        /* if we are deleting an obj that hasn't been created yet, wait.
1479         * This also makes sure that create_done cannot be called with an
1480         * already evicted inode.
1481         */
1482        wait_obj_created(oi);
1483        /* ignore the error, attempt a remove anyway */
1484
1485        /* Now Remove the OSD objects */
1486        ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1487        if (unlikely(ret)) {
1488                EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
1489                return;
1490        }
1491
1492        ios->done = delete_done;
1493        ios->private = sbi;
1494
1495        ret = ore_remove(ios);
1496        if (ret) {
1497                EXOFS_ERR("%s: ore_remove failed\n", __func__);
1498                ore_put_io_state(ios);
1499                return;
1500        }
1501        atomic_inc(&sbi->s_curr_pending);
1502
1503        return;
1504
1505no_delete:
1506        clear_inode(inode);
1507}
1508