linux/fs/xfs/xfs_buf.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include <linux/stddef.h>
  20#include <linux/errno.h>
  21#include <linux/gfp.h>
  22#include <linux/pagemap.h>
  23#include <linux/init.h>
  24#include <linux/vmalloc.h>
  25#include <linux/bio.h>
  26#include <linux/sysctl.h>
  27#include <linux/proc_fs.h>
  28#include <linux/workqueue.h>
  29#include <linux/percpu.h>
  30#include <linux/blkdev.h>
  31#include <linux/hash.h>
  32#include <linux/kthread.h>
  33#include <linux/migrate.h>
  34#include <linux/backing-dev.h>
  35#include <linux/freezer.h>
  36
  37#include "xfs_format.h"
  38#include "xfs_log_format.h"
  39#include "xfs_trans_resv.h"
  40#include "xfs_sb.h"
  41#include "xfs_mount.h"
  42#include "xfs_trace.h"
  43#include "xfs_log.h"
  44
  45static kmem_zone_t *xfs_buf_zone;
  46
  47#ifdef XFS_BUF_LOCK_TRACKING
  48# define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
  49# define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
  50# define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
  51#else
  52# define XB_SET_OWNER(bp)       do { } while (0)
  53# define XB_CLEAR_OWNER(bp)     do { } while (0)
  54# define XB_GET_OWNER(bp)       do { } while (0)
  55#endif
  56
  57#define xb_to_gfp(flags) \
  58        ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  59
  60
  61static inline int
  62xfs_buf_is_vmapped(
  63        struct xfs_buf  *bp)
  64{
  65        /*
  66         * Return true if the buffer is vmapped.
  67         *
  68         * b_addr is null if the buffer is not mapped, but the code is clever
  69         * enough to know it doesn't have to map a single page, so the check has
  70         * to be both for b_addr and bp->b_page_count > 1.
  71         */
  72        return bp->b_addr && bp->b_page_count > 1;
  73}
  74
  75static inline int
  76xfs_buf_vmap_len(
  77        struct xfs_buf  *bp)
  78{
  79        return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  80}
  81
  82/*
  83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  84 * b_lru_ref count so that the buffer is freed immediately when the buffer
  85 * reference count falls to zero. If the buffer is already on the LRU, we need
  86 * to remove the reference that LRU holds on the buffer.
  87 *
  88 * This prevents build-up of stale buffers on the LRU.
  89 */
  90void
  91xfs_buf_stale(
  92        struct xfs_buf  *bp)
  93{
  94        ASSERT(xfs_buf_islocked(bp));
  95
  96        bp->b_flags |= XBF_STALE;
  97
  98        /*
  99         * Clear the delwri status so that a delwri queue walker will not
 100         * flush this buffer to disk now that it is stale. The delwri queue has
 101         * a reference to the buffer, so this is safe to do.
 102         */
 103        bp->b_flags &= ~_XBF_DELWRI_Q;
 104
 105        spin_lock(&bp->b_lock);
 106        atomic_set(&bp->b_lru_ref, 0);
 107        if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 108            (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
 109                atomic_dec(&bp->b_hold);
 110
 111        ASSERT(atomic_read(&bp->b_hold) >= 1);
 112        spin_unlock(&bp->b_lock);
 113}
 114
 115static int
 116xfs_buf_get_maps(
 117        struct xfs_buf          *bp,
 118        int                     map_count)
 119{
 120        ASSERT(bp->b_maps == NULL);
 121        bp->b_map_count = map_count;
 122
 123        if (map_count == 1) {
 124                bp->b_maps = &bp->__b_map;
 125                return 0;
 126        }
 127
 128        bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
 129                                KM_NOFS);
 130        if (!bp->b_maps)
 131                return -ENOMEM;
 132        return 0;
 133}
 134
 135/*
 136 *      Frees b_pages if it was allocated.
 137 */
 138static void
 139xfs_buf_free_maps(
 140        struct xfs_buf  *bp)
 141{
 142        if (bp->b_maps != &bp->__b_map) {
 143                kmem_free(bp->b_maps);
 144                bp->b_maps = NULL;
 145        }
 146}
 147
 148struct xfs_buf *
 149_xfs_buf_alloc(
 150        struct xfs_buftarg      *target,
 151        struct xfs_buf_map      *map,
 152        int                     nmaps,
 153        xfs_buf_flags_t         flags)
 154{
 155        struct xfs_buf          *bp;
 156        int                     error;
 157        int                     i;
 158
 159        bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
 160        if (unlikely(!bp))
 161                return NULL;
 162
 163        /*
 164         * We don't want certain flags to appear in b_flags unless they are
 165         * specifically set by later operations on the buffer.
 166         */
 167        flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 168
 169        atomic_set(&bp->b_hold, 1);
 170        atomic_set(&bp->b_lru_ref, 1);
 171        init_completion(&bp->b_iowait);
 172        INIT_LIST_HEAD(&bp->b_lru);
 173        INIT_LIST_HEAD(&bp->b_list);
 174        RB_CLEAR_NODE(&bp->b_rbnode);
 175        sema_init(&bp->b_sema, 0); /* held, no waiters */
 176        spin_lock_init(&bp->b_lock);
 177        XB_SET_OWNER(bp);
 178        bp->b_target = target;
 179        bp->b_flags = flags;
 180
 181        /*
 182         * Set length and io_length to the same value initially.
 183         * I/O routines should use io_length, which will be the same in
 184         * most cases but may be reset (e.g. XFS recovery).
 185         */
 186        error = xfs_buf_get_maps(bp, nmaps);
 187        if (error)  {
 188                kmem_zone_free(xfs_buf_zone, bp);
 189                return NULL;
 190        }
 191
 192        bp->b_bn = map[0].bm_bn;
 193        bp->b_length = 0;
 194        for (i = 0; i < nmaps; i++) {
 195                bp->b_maps[i].bm_bn = map[i].bm_bn;
 196                bp->b_maps[i].bm_len = map[i].bm_len;
 197                bp->b_length += map[i].bm_len;
 198        }
 199        bp->b_io_length = bp->b_length;
 200
 201        atomic_set(&bp->b_pin_count, 0);
 202        init_waitqueue_head(&bp->b_waiters);
 203
 204        XFS_STATS_INC(target->bt_mount, xb_create);
 205        trace_xfs_buf_init(bp, _RET_IP_);
 206
 207        return bp;
 208}
 209
 210/*
 211 *      Allocate a page array capable of holding a specified number
 212 *      of pages, and point the page buf at it.
 213 */
 214STATIC int
 215_xfs_buf_get_pages(
 216        xfs_buf_t               *bp,
 217        int                     page_count)
 218{
 219        /* Make sure that we have a page list */
 220        if (bp->b_pages == NULL) {
 221                bp->b_page_count = page_count;
 222                if (page_count <= XB_PAGES) {
 223                        bp->b_pages = bp->b_page_array;
 224                } else {
 225                        bp->b_pages = kmem_alloc(sizeof(struct page *) *
 226                                                 page_count, KM_NOFS);
 227                        if (bp->b_pages == NULL)
 228                                return -ENOMEM;
 229                }
 230                memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
 231        }
 232        return 0;
 233}
 234
 235/*
 236 *      Frees b_pages if it was allocated.
 237 */
 238STATIC void
 239_xfs_buf_free_pages(
 240        xfs_buf_t       *bp)
 241{
 242        if (bp->b_pages != bp->b_page_array) {
 243                kmem_free(bp->b_pages);
 244                bp->b_pages = NULL;
 245        }
 246}
 247
 248/*
 249 *      Releases the specified buffer.
 250 *
 251 *      The modification state of any associated pages is left unchanged.
 252 *      The buffer must not be on any hash - use xfs_buf_rele instead for
 253 *      hashed and refcounted buffers
 254 */
 255void
 256xfs_buf_free(
 257        xfs_buf_t               *bp)
 258{
 259        trace_xfs_buf_free(bp, _RET_IP_);
 260
 261        ASSERT(list_empty(&bp->b_lru));
 262
 263        if (bp->b_flags & _XBF_PAGES) {
 264                uint            i;
 265
 266                if (xfs_buf_is_vmapped(bp))
 267                        vm_unmap_ram(bp->b_addr - bp->b_offset,
 268                                        bp->b_page_count);
 269
 270                for (i = 0; i < bp->b_page_count; i++) {
 271                        struct page     *page = bp->b_pages[i];
 272
 273                        __free_page(page);
 274                }
 275        } else if (bp->b_flags & _XBF_KMEM)
 276                kmem_free(bp->b_addr);
 277        _xfs_buf_free_pages(bp);
 278        xfs_buf_free_maps(bp);
 279        kmem_zone_free(xfs_buf_zone, bp);
 280}
 281
 282/*
 283 * Allocates all the pages for buffer in question and builds it's page list.
 284 */
 285STATIC int
 286xfs_buf_allocate_memory(
 287        xfs_buf_t               *bp,
 288        uint                    flags)
 289{
 290        size_t                  size;
 291        size_t                  nbytes, offset;
 292        gfp_t                   gfp_mask = xb_to_gfp(flags);
 293        unsigned short          page_count, i;
 294        xfs_off_t               start, end;
 295        int                     error;
 296
 297        /*
 298         * for buffers that are contained within a single page, just allocate
 299         * the memory from the heap - there's no need for the complexity of
 300         * page arrays to keep allocation down to order 0.
 301         */
 302        size = BBTOB(bp->b_length);
 303        if (size < PAGE_SIZE) {
 304                bp->b_addr = kmem_alloc(size, KM_NOFS);
 305                if (!bp->b_addr) {
 306                        /* low memory - use alloc_page loop instead */
 307                        goto use_alloc_page;
 308                }
 309
 310                if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
 311                    ((unsigned long)bp->b_addr & PAGE_MASK)) {
 312                        /* b_addr spans two pages - use alloc_page instead */
 313                        kmem_free(bp->b_addr);
 314                        bp->b_addr = NULL;
 315                        goto use_alloc_page;
 316                }
 317                bp->b_offset = offset_in_page(bp->b_addr);
 318                bp->b_pages = bp->b_page_array;
 319                bp->b_pages[0] = virt_to_page(bp->b_addr);
 320                bp->b_page_count = 1;
 321                bp->b_flags |= _XBF_KMEM;
 322                return 0;
 323        }
 324
 325use_alloc_page:
 326        start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
 327        end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
 328                                                                >> PAGE_SHIFT;
 329        page_count = end - start;
 330        error = _xfs_buf_get_pages(bp, page_count);
 331        if (unlikely(error))
 332                return error;
 333
 334        offset = bp->b_offset;
 335        bp->b_flags |= _XBF_PAGES;
 336
 337        for (i = 0; i < bp->b_page_count; i++) {
 338                struct page     *page;
 339                uint            retries = 0;
 340retry:
 341                page = alloc_page(gfp_mask);
 342                if (unlikely(page == NULL)) {
 343                        if (flags & XBF_READ_AHEAD) {
 344                                bp->b_page_count = i;
 345                                error = -ENOMEM;
 346                                goto out_free_pages;
 347                        }
 348
 349                        /*
 350                         * This could deadlock.
 351                         *
 352                         * But until all the XFS lowlevel code is revamped to
 353                         * handle buffer allocation failures we can't do much.
 354                         */
 355                        if (!(++retries % 100))
 356                                xfs_err(NULL,
 357                "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
 358                                        current->comm, current->pid,
 359                                        __func__, gfp_mask);
 360
 361                        XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
 362                        congestion_wait(BLK_RW_ASYNC, HZ/50);
 363                        goto retry;
 364                }
 365
 366                XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
 367
 368                nbytes = min_t(size_t, size, PAGE_SIZE - offset);
 369                size -= nbytes;
 370                bp->b_pages[i] = page;
 371                offset = 0;
 372        }
 373        return 0;
 374
 375out_free_pages:
 376        for (i = 0; i < bp->b_page_count; i++)
 377                __free_page(bp->b_pages[i]);
 378        return error;
 379}
 380
 381/*
 382 *      Map buffer into kernel address-space if necessary.
 383 */
 384STATIC int
 385_xfs_buf_map_pages(
 386        xfs_buf_t               *bp,
 387        uint                    flags)
 388{
 389        ASSERT(bp->b_flags & _XBF_PAGES);
 390        if (bp->b_page_count == 1) {
 391                /* A single page buffer is always mappable */
 392                bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 393        } else if (flags & XBF_UNMAPPED) {
 394                bp->b_addr = NULL;
 395        } else {
 396                int retried = 0;
 397                unsigned noio_flag;
 398
 399                /*
 400                 * vm_map_ram() will allocate auxillary structures (e.g.
 401                 * pagetables) with GFP_KERNEL, yet we are likely to be under
 402                 * GFP_NOFS context here. Hence we need to tell memory reclaim
 403                 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
 404                 * memory reclaim re-entering the filesystem here and
 405                 * potentially deadlocking.
 406                 */
 407                noio_flag = memalloc_noio_save();
 408                do {
 409                        bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
 410                                                -1, PAGE_KERNEL);
 411                        if (bp->b_addr)
 412                                break;
 413                        vm_unmap_aliases();
 414                } while (retried++ <= 1);
 415                memalloc_noio_restore(noio_flag);
 416
 417                if (!bp->b_addr)
 418                        return -ENOMEM;
 419                bp->b_addr += bp->b_offset;
 420        }
 421
 422        return 0;
 423}
 424
 425/*
 426 *      Finding and Reading Buffers
 427 */
 428
 429/*
 430 *      Look up, and creates if absent, a lockable buffer for
 431 *      a given range of an inode.  The buffer is returned
 432 *      locked. No I/O is implied by this call.
 433 */
 434xfs_buf_t *
 435_xfs_buf_find(
 436        struct xfs_buftarg      *btp,
 437        struct xfs_buf_map      *map,
 438        int                     nmaps,
 439        xfs_buf_flags_t         flags,
 440        xfs_buf_t               *new_bp)
 441{
 442        struct xfs_perag        *pag;
 443        struct rb_node          **rbp;
 444        struct rb_node          *parent;
 445        xfs_buf_t               *bp;
 446        xfs_daddr_t             blkno = map[0].bm_bn;
 447        xfs_daddr_t             eofs;
 448        int                     numblks = 0;
 449        int                     i;
 450
 451        for (i = 0; i < nmaps; i++)
 452                numblks += map[i].bm_len;
 453
 454        /* Check for IOs smaller than the sector size / not sector aligned */
 455        ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
 456        ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
 457
 458        /*
 459         * Corrupted block numbers can get through to here, unfortunately, so we
 460         * have to check that the buffer falls within the filesystem bounds.
 461         */
 462        eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
 463        if (blkno < 0 || blkno >= eofs) {
 464                /*
 465                 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
 466                 * but none of the higher level infrastructure supports
 467                 * returning a specific error on buffer lookup failures.
 468                 */
 469                xfs_alert(btp->bt_mount,
 470                          "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
 471                          __func__, blkno, eofs);
 472                WARN_ON(1);
 473                return NULL;
 474        }
 475
 476        /* get tree root */
 477        pag = xfs_perag_get(btp->bt_mount,
 478                                xfs_daddr_to_agno(btp->bt_mount, blkno));
 479
 480        /* walk tree */
 481        spin_lock(&pag->pag_buf_lock);
 482        rbp = &pag->pag_buf_tree.rb_node;
 483        parent = NULL;
 484        bp = NULL;
 485        while (*rbp) {
 486                parent = *rbp;
 487                bp = rb_entry(parent, struct xfs_buf, b_rbnode);
 488
 489                if (blkno < bp->b_bn)
 490                        rbp = &(*rbp)->rb_left;
 491                else if (blkno > bp->b_bn)
 492                        rbp = &(*rbp)->rb_right;
 493                else {
 494                        /*
 495                         * found a block number match. If the range doesn't
 496                         * match, the only way this is allowed is if the buffer
 497                         * in the cache is stale and the transaction that made
 498                         * it stale has not yet committed. i.e. we are
 499                         * reallocating a busy extent. Skip this buffer and
 500                         * continue searching to the right for an exact match.
 501                         */
 502                        if (bp->b_length != numblks) {
 503                                ASSERT(bp->b_flags & XBF_STALE);
 504                                rbp = &(*rbp)->rb_right;
 505                                continue;
 506                        }
 507                        atomic_inc(&bp->b_hold);
 508                        goto found;
 509                }
 510        }
 511
 512        /* No match found */
 513        if (new_bp) {
 514                rb_link_node(&new_bp->b_rbnode, parent, rbp);
 515                rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
 516                /* the buffer keeps the perag reference until it is freed */
 517                new_bp->b_pag = pag;
 518                spin_unlock(&pag->pag_buf_lock);
 519        } else {
 520                XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 521                spin_unlock(&pag->pag_buf_lock);
 522                xfs_perag_put(pag);
 523        }
 524        return new_bp;
 525
 526found:
 527        spin_unlock(&pag->pag_buf_lock);
 528        xfs_perag_put(pag);
 529
 530        if (!xfs_buf_trylock(bp)) {
 531                if (flags & XBF_TRYLOCK) {
 532                        xfs_buf_rele(bp);
 533                        XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
 534                        return NULL;
 535                }
 536                xfs_buf_lock(bp);
 537                XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
 538        }
 539
 540        /*
 541         * if the buffer is stale, clear all the external state associated with
 542         * it. We need to keep flags such as how we allocated the buffer memory
 543         * intact here.
 544         */
 545        if (bp->b_flags & XBF_STALE) {
 546                ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
 547                ASSERT(bp->b_iodone == NULL);
 548                bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
 549                bp->b_ops = NULL;
 550        }
 551
 552        trace_xfs_buf_find(bp, flags, _RET_IP_);
 553        XFS_STATS_INC(btp->bt_mount, xb_get_locked);
 554        return bp;
 555}
 556
 557/*
 558 * Assembles a buffer covering the specified range. The code is optimised for
 559 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 560 * more hits than misses.
 561 */
 562struct xfs_buf *
 563xfs_buf_get_map(
 564        struct xfs_buftarg      *target,
 565        struct xfs_buf_map      *map,
 566        int                     nmaps,
 567        xfs_buf_flags_t         flags)
 568{
 569        struct xfs_buf          *bp;
 570        struct xfs_buf          *new_bp;
 571        int                     error = 0;
 572
 573        bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
 574        if (likely(bp))
 575                goto found;
 576
 577        new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
 578        if (unlikely(!new_bp))
 579                return NULL;
 580
 581        error = xfs_buf_allocate_memory(new_bp, flags);
 582        if (error) {
 583                xfs_buf_free(new_bp);
 584                return NULL;
 585        }
 586
 587        bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
 588        if (!bp) {
 589                xfs_buf_free(new_bp);
 590                return NULL;
 591        }
 592
 593        if (bp != new_bp)
 594                xfs_buf_free(new_bp);
 595
 596found:
 597        if (!bp->b_addr) {
 598                error = _xfs_buf_map_pages(bp, flags);
 599                if (unlikely(error)) {
 600                        xfs_warn(target->bt_mount,
 601                                "%s: failed to map pagesn", __func__);
 602                        xfs_buf_relse(bp);
 603                        return NULL;
 604                }
 605        }
 606
 607        /*
 608         * Clear b_error if this is a lookup from a caller that doesn't expect
 609         * valid data to be found in the buffer.
 610         */
 611        if (!(flags & XBF_READ))
 612                xfs_buf_ioerror(bp, 0);
 613
 614        XFS_STATS_INC(target->bt_mount, xb_get);
 615        trace_xfs_buf_get(bp, flags, _RET_IP_);
 616        return bp;
 617}
 618
 619STATIC int
 620_xfs_buf_read(
 621        xfs_buf_t               *bp,
 622        xfs_buf_flags_t         flags)
 623{
 624        ASSERT(!(flags & XBF_WRITE));
 625        ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 626
 627        bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
 628        bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
 629
 630        if (flags & XBF_ASYNC) {
 631                xfs_buf_submit(bp);
 632                return 0;
 633        }
 634        return xfs_buf_submit_wait(bp);
 635}
 636
 637xfs_buf_t *
 638xfs_buf_read_map(
 639        struct xfs_buftarg      *target,
 640        struct xfs_buf_map      *map,
 641        int                     nmaps,
 642        xfs_buf_flags_t         flags,
 643        const struct xfs_buf_ops *ops)
 644{
 645        struct xfs_buf          *bp;
 646
 647        flags |= XBF_READ;
 648
 649        bp = xfs_buf_get_map(target, map, nmaps, flags);
 650        if (bp) {
 651                trace_xfs_buf_read(bp, flags, _RET_IP_);
 652
 653                if (!(bp->b_flags & XBF_DONE)) {
 654                        XFS_STATS_INC(target->bt_mount, xb_get_read);
 655                        bp->b_ops = ops;
 656                        _xfs_buf_read(bp, flags);
 657                } else if (flags & XBF_ASYNC) {
 658                        /*
 659                         * Read ahead call which is already satisfied,
 660                         * drop the buffer
 661                         */
 662                        xfs_buf_relse(bp);
 663                        return NULL;
 664                } else {
 665                        /* We do not want read in the flags */
 666                        bp->b_flags &= ~XBF_READ;
 667                }
 668        }
 669
 670        return bp;
 671}
 672
 673/*
 674 *      If we are not low on memory then do the readahead in a deadlock
 675 *      safe manner.
 676 */
 677void
 678xfs_buf_readahead_map(
 679        struct xfs_buftarg      *target,
 680        struct xfs_buf_map      *map,
 681        int                     nmaps,
 682        const struct xfs_buf_ops *ops)
 683{
 684        if (bdi_read_congested(target->bt_bdi))
 685                return;
 686
 687        xfs_buf_read_map(target, map, nmaps,
 688                     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
 689}
 690
 691/*
 692 * Read an uncached buffer from disk. Allocates and returns a locked
 693 * buffer containing the disk contents or nothing.
 694 */
 695int
 696xfs_buf_read_uncached(
 697        struct xfs_buftarg      *target,
 698        xfs_daddr_t             daddr,
 699        size_t                  numblks,
 700        int                     flags,
 701        struct xfs_buf          **bpp,
 702        const struct xfs_buf_ops *ops)
 703{
 704        struct xfs_buf          *bp;
 705
 706        *bpp = NULL;
 707
 708        bp = xfs_buf_get_uncached(target, numblks, flags);
 709        if (!bp)
 710                return -ENOMEM;
 711
 712        /* set up the buffer for a read IO */
 713        ASSERT(bp->b_map_count == 1);
 714        bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
 715        bp->b_maps[0].bm_bn = daddr;
 716        bp->b_flags |= XBF_READ;
 717        bp->b_ops = ops;
 718
 719        xfs_buf_submit_wait(bp);
 720        if (bp->b_error) {
 721                int     error = bp->b_error;
 722                xfs_buf_relse(bp);
 723                return error;
 724        }
 725
 726        *bpp = bp;
 727        return 0;
 728}
 729
 730/*
 731 * Return a buffer allocated as an empty buffer and associated to external
 732 * memory via xfs_buf_associate_memory() back to it's empty state.
 733 */
 734void
 735xfs_buf_set_empty(
 736        struct xfs_buf          *bp,
 737        size_t                  numblks)
 738{
 739        if (bp->b_pages)
 740                _xfs_buf_free_pages(bp);
 741
 742        bp->b_pages = NULL;
 743        bp->b_page_count = 0;
 744        bp->b_addr = NULL;
 745        bp->b_length = numblks;
 746        bp->b_io_length = numblks;
 747
 748        ASSERT(bp->b_map_count == 1);
 749        bp->b_bn = XFS_BUF_DADDR_NULL;
 750        bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
 751        bp->b_maps[0].bm_len = bp->b_length;
 752}
 753
 754static inline struct page *
 755mem_to_page(
 756        void                    *addr)
 757{
 758        if ((!is_vmalloc_addr(addr))) {
 759                return virt_to_page(addr);
 760        } else {
 761                return vmalloc_to_page(addr);
 762        }
 763}
 764
 765int
 766xfs_buf_associate_memory(
 767        xfs_buf_t               *bp,
 768        void                    *mem,
 769        size_t                  len)
 770{
 771        int                     rval;
 772        int                     i = 0;
 773        unsigned long           pageaddr;
 774        unsigned long           offset;
 775        size_t                  buflen;
 776        int                     page_count;
 777
 778        pageaddr = (unsigned long)mem & PAGE_MASK;
 779        offset = (unsigned long)mem - pageaddr;
 780        buflen = PAGE_ALIGN(len + offset);
 781        page_count = buflen >> PAGE_SHIFT;
 782
 783        /* Free any previous set of page pointers */
 784        if (bp->b_pages)
 785                _xfs_buf_free_pages(bp);
 786
 787        bp->b_pages = NULL;
 788        bp->b_addr = mem;
 789
 790        rval = _xfs_buf_get_pages(bp, page_count);
 791        if (rval)
 792                return rval;
 793
 794        bp->b_offset = offset;
 795
 796        for (i = 0; i < bp->b_page_count; i++) {
 797                bp->b_pages[i] = mem_to_page((void *)pageaddr);
 798                pageaddr += PAGE_SIZE;
 799        }
 800
 801        bp->b_io_length = BTOBB(len);
 802        bp->b_length = BTOBB(buflen);
 803
 804        return 0;
 805}
 806
 807xfs_buf_t *
 808xfs_buf_get_uncached(
 809        struct xfs_buftarg      *target,
 810        size_t                  numblks,
 811        int                     flags)
 812{
 813        unsigned long           page_count;
 814        int                     error, i;
 815        struct xfs_buf          *bp;
 816        DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
 817
 818        bp = _xfs_buf_alloc(target, &map, 1, 0);
 819        if (unlikely(bp == NULL))
 820                goto fail;
 821
 822        page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
 823        error = _xfs_buf_get_pages(bp, page_count);
 824        if (error)
 825                goto fail_free_buf;
 826
 827        for (i = 0; i < page_count; i++) {
 828                bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
 829                if (!bp->b_pages[i])
 830                        goto fail_free_mem;
 831        }
 832        bp->b_flags |= _XBF_PAGES;
 833
 834        error = _xfs_buf_map_pages(bp, 0);
 835        if (unlikely(error)) {
 836                xfs_warn(target->bt_mount,
 837                        "%s: failed to map pages", __func__);
 838                goto fail_free_mem;
 839        }
 840
 841        trace_xfs_buf_get_uncached(bp, _RET_IP_);
 842        return bp;
 843
 844 fail_free_mem:
 845        while (--i >= 0)
 846                __free_page(bp->b_pages[i]);
 847        _xfs_buf_free_pages(bp);
 848 fail_free_buf:
 849        xfs_buf_free_maps(bp);
 850        kmem_zone_free(xfs_buf_zone, bp);
 851 fail:
 852        return NULL;
 853}
 854
 855/*
 856 *      Increment reference count on buffer, to hold the buffer concurrently
 857 *      with another thread which may release (free) the buffer asynchronously.
 858 *      Must hold the buffer already to call this function.
 859 */
 860void
 861xfs_buf_hold(
 862        xfs_buf_t               *bp)
 863{
 864        trace_xfs_buf_hold(bp, _RET_IP_);
 865        atomic_inc(&bp->b_hold);
 866}
 867
 868/*
 869 *      Releases a hold on the specified buffer.  If the
 870 *      the hold count is 1, calls xfs_buf_free.
 871 */
 872void
 873xfs_buf_rele(
 874        xfs_buf_t               *bp)
 875{
 876        struct xfs_perag        *pag = bp->b_pag;
 877
 878        trace_xfs_buf_rele(bp, _RET_IP_);
 879
 880        if (!pag) {
 881                ASSERT(list_empty(&bp->b_lru));
 882                ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 883                if (atomic_dec_and_test(&bp->b_hold))
 884                        xfs_buf_free(bp);
 885                return;
 886        }
 887
 888        ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
 889
 890        ASSERT(atomic_read(&bp->b_hold) > 0);
 891        if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
 892                spin_lock(&bp->b_lock);
 893                if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
 894                        /*
 895                         * If the buffer is added to the LRU take a new
 896                         * reference to the buffer for the LRU and clear the
 897                         * (now stale) dispose list state flag
 898                         */
 899                        if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
 900                                bp->b_state &= ~XFS_BSTATE_DISPOSE;
 901                                atomic_inc(&bp->b_hold);
 902                        }
 903                        spin_unlock(&bp->b_lock);
 904                        spin_unlock(&pag->pag_buf_lock);
 905                } else {
 906                        /*
 907                         * most of the time buffers will already be removed from
 908                         * the LRU, so optimise that case by checking for the
 909                         * XFS_BSTATE_DISPOSE flag indicating the last list the
 910                         * buffer was on was the disposal list
 911                         */
 912                        if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
 913                                list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
 914                        } else {
 915                                ASSERT(list_empty(&bp->b_lru));
 916                        }
 917                        spin_unlock(&bp->b_lock);
 918
 919                        ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
 920                        rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 921                        spin_unlock(&pag->pag_buf_lock);
 922                        xfs_perag_put(pag);
 923                        xfs_buf_free(bp);
 924                }
 925        }
 926}
 927
 928
 929/*
 930 *      Lock a buffer object, if it is not already locked.
 931 *
 932 *      If we come across a stale, pinned, locked buffer, we know that we are
 933 *      being asked to lock a buffer that has been reallocated. Because it is
 934 *      pinned, we know that the log has not been pushed to disk and hence it
 935 *      will still be locked.  Rather than continuing to have trylock attempts
 936 *      fail until someone else pushes the log, push it ourselves before
 937 *      returning.  This means that the xfsaild will not get stuck trying
 938 *      to push on stale inode buffers.
 939 */
 940int
 941xfs_buf_trylock(
 942        struct xfs_buf          *bp)
 943{
 944        int                     locked;
 945
 946        locked = down_trylock(&bp->b_sema) == 0;
 947        if (locked)
 948                XB_SET_OWNER(bp);
 949
 950        trace_xfs_buf_trylock(bp, _RET_IP_);
 951        return locked;
 952}
 953
 954/*
 955 *      Lock a buffer object.
 956 *
 957 *      If we come across a stale, pinned, locked buffer, we know that we
 958 *      are being asked to lock a buffer that has been reallocated. Because
 959 *      it is pinned, we know that the log has not been pushed to disk and
 960 *      hence it will still be locked. Rather than sleeping until someone
 961 *      else pushes the log, push it ourselves before trying to get the lock.
 962 */
 963void
 964xfs_buf_lock(
 965        struct xfs_buf          *bp)
 966{
 967        trace_xfs_buf_lock(bp, _RET_IP_);
 968
 969        if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
 970                xfs_log_force(bp->b_target->bt_mount, 0);
 971        down(&bp->b_sema);
 972        XB_SET_OWNER(bp);
 973
 974        trace_xfs_buf_lock_done(bp, _RET_IP_);
 975}
 976
 977void
 978xfs_buf_unlock(
 979        struct xfs_buf          *bp)
 980{
 981        XB_CLEAR_OWNER(bp);
 982        up(&bp->b_sema);
 983
 984        trace_xfs_buf_unlock(bp, _RET_IP_);
 985}
 986
 987STATIC void
 988xfs_buf_wait_unpin(
 989        xfs_buf_t               *bp)
 990{
 991        DECLARE_WAITQUEUE       (wait, current);
 992
 993        if (atomic_read(&bp->b_pin_count) == 0)
 994                return;
 995
 996        add_wait_queue(&bp->b_waiters, &wait);
 997        for (;;) {
 998                set_current_state(TASK_UNINTERRUPTIBLE);
 999                if (atomic_read(&bp->b_pin_count) == 0)
1000                        break;
1001                io_schedule();
1002        }
1003        remove_wait_queue(&bp->b_waiters, &wait);
1004        set_current_state(TASK_RUNNING);
1005}
1006
1007/*
1008 *      Buffer Utility Routines
1009 */
1010
1011void
1012xfs_buf_ioend(
1013        struct xfs_buf  *bp)
1014{
1015        bool            read = bp->b_flags & XBF_READ;
1016
1017        trace_xfs_buf_iodone(bp, _RET_IP_);
1018
1019        bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1020
1021        /*
1022         * Pull in IO completion errors now. We are guaranteed to be running
1023         * single threaded, so we don't need the lock to read b_io_error.
1024         */
1025        if (!bp->b_error && bp->b_io_error)
1026                xfs_buf_ioerror(bp, bp->b_io_error);
1027
1028        /* Only validate buffers that were read without errors */
1029        if (read && !bp->b_error && bp->b_ops) {
1030                ASSERT(!bp->b_iodone);
1031                bp->b_ops->verify_read(bp);
1032        }
1033
1034        if (!bp->b_error)
1035                bp->b_flags |= XBF_DONE;
1036
1037        if (bp->b_iodone)
1038                (*(bp->b_iodone))(bp);
1039        else if (bp->b_flags & XBF_ASYNC)
1040                xfs_buf_relse(bp);
1041        else
1042                complete(&bp->b_iowait);
1043}
1044
1045static void
1046xfs_buf_ioend_work(
1047        struct work_struct      *work)
1048{
1049        struct xfs_buf          *bp =
1050                container_of(work, xfs_buf_t, b_ioend_work);
1051
1052        xfs_buf_ioend(bp);
1053}
1054
1055static void
1056xfs_buf_ioend_async(
1057        struct xfs_buf  *bp)
1058{
1059        INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1060        queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
1061}
1062
1063void
1064xfs_buf_ioerror(
1065        xfs_buf_t               *bp,
1066        int                     error)
1067{
1068        ASSERT(error <= 0 && error >= -1000);
1069        bp->b_error = error;
1070        trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1071}
1072
1073void
1074xfs_buf_ioerror_alert(
1075        struct xfs_buf          *bp,
1076        const char              *func)
1077{
1078        xfs_alert(bp->b_target->bt_mount,
1079"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1080                (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1081}
1082
1083int
1084xfs_bwrite(
1085        struct xfs_buf          *bp)
1086{
1087        int                     error;
1088
1089        ASSERT(xfs_buf_islocked(bp));
1090
1091        bp->b_flags |= XBF_WRITE;
1092        bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1093                         XBF_WRITE_FAIL | XBF_DONE);
1094
1095        error = xfs_buf_submit_wait(bp);
1096        if (error) {
1097                xfs_force_shutdown(bp->b_target->bt_mount,
1098                                   SHUTDOWN_META_IO_ERROR);
1099        }
1100        return error;
1101}
1102
1103STATIC void
1104xfs_buf_bio_end_io(
1105        struct bio              *bio)
1106{
1107        xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1108
1109        /*
1110         * don't overwrite existing errors - otherwise we can lose errors on
1111         * buffers that require multiple bios to complete.
1112         */
1113        if (bio->bi_error) {
1114                spin_lock(&bp->b_lock);
1115                if (!bp->b_io_error)
1116                        bp->b_io_error = bio->bi_error;
1117                spin_unlock(&bp->b_lock);
1118        }
1119
1120        if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1121                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1122
1123        if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1124                xfs_buf_ioend_async(bp);
1125        bio_put(bio);
1126}
1127
1128static void
1129xfs_buf_ioapply_map(
1130        struct xfs_buf  *bp,
1131        int             map,
1132        int             *buf_offset,
1133        int             *count,
1134        int             rw)
1135{
1136        int             page_index;
1137        int             total_nr_pages = bp->b_page_count;
1138        int             nr_pages;
1139        struct bio      *bio;
1140        sector_t        sector =  bp->b_maps[map].bm_bn;
1141        int             size;
1142        int             offset;
1143
1144        total_nr_pages = bp->b_page_count;
1145
1146        /* skip the pages in the buffer before the start offset */
1147        page_index = 0;
1148        offset = *buf_offset;
1149        while (offset >= PAGE_SIZE) {
1150                page_index++;
1151                offset -= PAGE_SIZE;
1152        }
1153
1154        /*
1155         * Limit the IO size to the length of the current vector, and update the
1156         * remaining IO count for the next time around.
1157         */
1158        size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1159        *count -= size;
1160        *buf_offset += size;
1161
1162next_chunk:
1163        atomic_inc(&bp->b_io_remaining);
1164        nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1165        if (nr_pages > total_nr_pages)
1166                nr_pages = total_nr_pages;
1167
1168        bio = bio_alloc(GFP_NOIO, nr_pages);
1169        bio->bi_bdev = bp->b_target->bt_bdev;
1170        bio->bi_iter.bi_sector = sector;
1171        bio->bi_end_io = xfs_buf_bio_end_io;
1172        bio->bi_private = bp;
1173
1174
1175        for (; size && nr_pages; nr_pages--, page_index++) {
1176                int     rbytes, nbytes = PAGE_SIZE - offset;
1177
1178                if (nbytes > size)
1179                        nbytes = size;
1180
1181                rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1182                                      offset);
1183                if (rbytes < nbytes)
1184                        break;
1185
1186                offset = 0;
1187                sector += BTOBB(nbytes);
1188                size -= nbytes;
1189                total_nr_pages--;
1190        }
1191
1192        if (likely(bio->bi_iter.bi_size)) {
1193                if (xfs_buf_is_vmapped(bp)) {
1194                        flush_kernel_vmap_range(bp->b_addr,
1195                                                xfs_buf_vmap_len(bp));
1196                }
1197                submit_bio(rw, bio);
1198                if (size)
1199                        goto next_chunk;
1200        } else {
1201                /*
1202                 * This is guaranteed not to be the last io reference count
1203                 * because the caller (xfs_buf_submit) holds a count itself.
1204                 */
1205                atomic_dec(&bp->b_io_remaining);
1206                xfs_buf_ioerror(bp, -EIO);
1207                bio_put(bio);
1208        }
1209
1210}
1211
1212STATIC void
1213_xfs_buf_ioapply(
1214        struct xfs_buf  *bp)
1215{
1216        struct blk_plug plug;
1217        int             rw;
1218        int             offset;
1219        int             size;
1220        int             i;
1221
1222        /*
1223         * Make sure we capture only current IO errors rather than stale errors
1224         * left over from previous use of the buffer (e.g. failed readahead).
1225         */
1226        bp->b_error = 0;
1227
1228        /*
1229         * Initialize the I/O completion workqueue if we haven't yet or the
1230         * submitter has not opted to specify a custom one.
1231         */
1232        if (!bp->b_ioend_wq)
1233                bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1234
1235        if (bp->b_flags & XBF_WRITE) {
1236                if (bp->b_flags & XBF_SYNCIO)
1237                        rw = WRITE_SYNC;
1238                else
1239                        rw = WRITE;
1240                if (bp->b_flags & XBF_FUA)
1241                        rw |= REQ_FUA;
1242                if (bp->b_flags & XBF_FLUSH)
1243                        rw |= REQ_FLUSH;
1244
1245                /*
1246                 * Run the write verifier callback function if it exists. If
1247                 * this function fails it will mark the buffer with an error and
1248                 * the IO should not be dispatched.
1249                 */
1250                if (bp->b_ops) {
1251                        bp->b_ops->verify_write(bp);
1252                        if (bp->b_error) {
1253                                xfs_force_shutdown(bp->b_target->bt_mount,
1254                                                   SHUTDOWN_CORRUPT_INCORE);
1255                                return;
1256                        }
1257                } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1258                        struct xfs_mount *mp = bp->b_target->bt_mount;
1259
1260                        /*
1261                         * non-crc filesystems don't attach verifiers during
1262                         * log recovery, so don't warn for such filesystems.
1263                         */
1264                        if (xfs_sb_version_hascrc(&mp->m_sb)) {
1265                                xfs_warn(mp,
1266                                        "%s: no ops on block 0x%llx/0x%x",
1267                                        __func__, bp->b_bn, bp->b_length);
1268                                xfs_hex_dump(bp->b_addr, 64);
1269                                dump_stack();
1270                        }
1271                }
1272        } else if (bp->b_flags & XBF_READ_AHEAD) {
1273                rw = READA;
1274        } else {
1275                rw = READ;
1276        }
1277
1278        /* we only use the buffer cache for meta-data */
1279        rw |= REQ_META;
1280
1281        /*
1282         * Walk all the vectors issuing IO on them. Set up the initial offset
1283         * into the buffer and the desired IO size before we start -
1284         * _xfs_buf_ioapply_vec() will modify them appropriately for each
1285         * subsequent call.
1286         */
1287        offset = bp->b_offset;
1288        size = BBTOB(bp->b_io_length);
1289        blk_start_plug(&plug);
1290        for (i = 0; i < bp->b_map_count; i++) {
1291                xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1292                if (bp->b_error)
1293                        break;
1294                if (size <= 0)
1295                        break;  /* all done */
1296        }
1297        blk_finish_plug(&plug);
1298}
1299
1300/*
1301 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1302 * the current reference to the IO. It is not safe to reference the buffer after
1303 * a call to this function unless the caller holds an additional reference
1304 * itself.
1305 */
1306void
1307xfs_buf_submit(
1308        struct xfs_buf  *bp)
1309{
1310        trace_xfs_buf_submit(bp, _RET_IP_);
1311
1312        ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1313        ASSERT(bp->b_flags & XBF_ASYNC);
1314
1315        /* on shutdown we stale and complete the buffer immediately */
1316        if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1317                xfs_buf_ioerror(bp, -EIO);
1318                bp->b_flags &= ~XBF_DONE;
1319                xfs_buf_stale(bp);
1320                xfs_buf_ioend(bp);
1321                return;
1322        }
1323
1324        if (bp->b_flags & XBF_WRITE)
1325                xfs_buf_wait_unpin(bp);
1326
1327        /* clear the internal error state to avoid spurious errors */
1328        bp->b_io_error = 0;
1329
1330        /*
1331         * The caller's reference is released during I/O completion.
1332         * This occurs some time after the last b_io_remaining reference is
1333         * released, so after we drop our Io reference we have to have some
1334         * other reference to ensure the buffer doesn't go away from underneath
1335         * us. Take a direct reference to ensure we have safe access to the
1336         * buffer until we are finished with it.
1337         */
1338        xfs_buf_hold(bp);
1339
1340        /*
1341         * Set the count to 1 initially, this will stop an I/O completion
1342         * callout which happens before we have started all the I/O from calling
1343         * xfs_buf_ioend too early.
1344         */
1345        atomic_set(&bp->b_io_remaining, 1);
1346        _xfs_buf_ioapply(bp);
1347
1348        /*
1349         * If _xfs_buf_ioapply failed, we can get back here with only the IO
1350         * reference we took above. If we drop it to zero, run completion so
1351         * that we don't return to the caller with completion still pending.
1352         */
1353        if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1354                if (bp->b_error)
1355                        xfs_buf_ioend(bp);
1356                else
1357                        xfs_buf_ioend_async(bp);
1358        }
1359
1360        xfs_buf_rele(bp);
1361        /* Note: it is not safe to reference bp now we've dropped our ref */
1362}
1363
1364/*
1365 * Synchronous buffer IO submission path, read or write.
1366 */
1367int
1368xfs_buf_submit_wait(
1369        struct xfs_buf  *bp)
1370{
1371        int             error;
1372
1373        trace_xfs_buf_submit_wait(bp, _RET_IP_);
1374
1375        ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1376
1377        if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1378                xfs_buf_ioerror(bp, -EIO);
1379                xfs_buf_stale(bp);
1380                bp->b_flags &= ~XBF_DONE;
1381                return -EIO;
1382        }
1383
1384        if (bp->b_flags & XBF_WRITE)
1385                xfs_buf_wait_unpin(bp);
1386
1387        /* clear the internal error state to avoid spurious errors */
1388        bp->b_io_error = 0;
1389
1390        /*
1391         * For synchronous IO, the IO does not inherit the submitters reference
1392         * count, nor the buffer lock. Hence we cannot release the reference we
1393         * are about to take until we've waited for all IO completion to occur,
1394         * including any xfs_buf_ioend_async() work that may be pending.
1395         */
1396        xfs_buf_hold(bp);
1397
1398        /*
1399         * Set the count to 1 initially, this will stop an I/O completion
1400         * callout which happens before we have started all the I/O from calling
1401         * xfs_buf_ioend too early.
1402         */
1403        atomic_set(&bp->b_io_remaining, 1);
1404        _xfs_buf_ioapply(bp);
1405
1406        /*
1407         * make sure we run completion synchronously if it raced with us and is
1408         * already complete.
1409         */
1410        if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1411                xfs_buf_ioend(bp);
1412
1413        /* wait for completion before gathering the error from the buffer */
1414        trace_xfs_buf_iowait(bp, _RET_IP_);
1415        wait_for_completion(&bp->b_iowait);
1416        trace_xfs_buf_iowait_done(bp, _RET_IP_);
1417        error = bp->b_error;
1418
1419        /*
1420         * all done now, we can release the hold that keeps the buffer
1421         * referenced for the entire IO.
1422         */
1423        xfs_buf_rele(bp);
1424        return error;
1425}
1426
1427void *
1428xfs_buf_offset(
1429        struct xfs_buf          *bp,
1430        size_t                  offset)
1431{
1432        struct page             *page;
1433
1434        if (bp->b_addr)
1435                return bp->b_addr + offset;
1436
1437        offset += bp->b_offset;
1438        page = bp->b_pages[offset >> PAGE_SHIFT];
1439        return page_address(page) + (offset & (PAGE_SIZE-1));
1440}
1441
1442/*
1443 *      Move data into or out of a buffer.
1444 */
1445void
1446xfs_buf_iomove(
1447        xfs_buf_t               *bp,    /* buffer to process            */
1448        size_t                  boff,   /* starting buffer offset       */
1449        size_t                  bsize,  /* length to copy               */
1450        void                    *data,  /* data address                 */
1451        xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1452{
1453        size_t                  bend;
1454
1455        bend = boff + bsize;
1456        while (boff < bend) {
1457                struct page     *page;
1458                int             page_index, page_offset, csize;
1459
1460                page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1461                page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1462                page = bp->b_pages[page_index];
1463                csize = min_t(size_t, PAGE_SIZE - page_offset,
1464                                      BBTOB(bp->b_io_length) - boff);
1465
1466                ASSERT((csize + page_offset) <= PAGE_SIZE);
1467
1468                switch (mode) {
1469                case XBRW_ZERO:
1470                        memset(page_address(page) + page_offset, 0, csize);
1471                        break;
1472                case XBRW_READ:
1473                        memcpy(data, page_address(page) + page_offset, csize);
1474                        break;
1475                case XBRW_WRITE:
1476                        memcpy(page_address(page) + page_offset, data, csize);
1477                }
1478
1479                boff += csize;
1480                data += csize;
1481        }
1482}
1483
1484/*
1485 *      Handling of buffer targets (buftargs).
1486 */
1487
1488/*
1489 * Wait for any bufs with callbacks that have been submitted but have not yet
1490 * returned. These buffers will have an elevated hold count, so wait on those
1491 * while freeing all the buffers only held by the LRU.
1492 */
1493static enum lru_status
1494xfs_buftarg_wait_rele(
1495        struct list_head        *item,
1496        struct list_lru_one     *lru,
1497        spinlock_t              *lru_lock,
1498        void                    *arg)
1499
1500{
1501        struct xfs_buf          *bp = container_of(item, struct xfs_buf, b_lru);
1502        struct list_head        *dispose = arg;
1503
1504        if (atomic_read(&bp->b_hold) > 1) {
1505                /* need to wait, so skip it this pass */
1506                trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1507                return LRU_SKIP;
1508        }
1509        if (!spin_trylock(&bp->b_lock))
1510                return LRU_SKIP;
1511
1512        /*
1513         * clear the LRU reference count so the buffer doesn't get
1514         * ignored in xfs_buf_rele().
1515         */
1516        atomic_set(&bp->b_lru_ref, 0);
1517        bp->b_state |= XFS_BSTATE_DISPOSE;
1518        list_lru_isolate_move(lru, item, dispose);
1519        spin_unlock(&bp->b_lock);
1520        return LRU_REMOVED;
1521}
1522
1523void
1524xfs_wait_buftarg(
1525        struct xfs_buftarg      *btp)
1526{
1527        LIST_HEAD(dispose);
1528        int loop = 0;
1529
1530        /*
1531         * We need to flush the buffer workqueue to ensure that all IO
1532         * completion processing is 100% done. Just waiting on buffer locks is
1533         * not sufficient for async IO as the reference count held over IO is
1534         * not released until after the buffer lock is dropped. Hence we need to
1535         * ensure here that all reference counts have been dropped before we
1536         * start walking the LRU list.
1537         */
1538        drain_workqueue(btp->bt_mount->m_buf_workqueue);
1539
1540        /* loop until there is nothing left on the lru list. */
1541        while (list_lru_count(&btp->bt_lru)) {
1542                list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1543                              &dispose, LONG_MAX);
1544
1545                while (!list_empty(&dispose)) {
1546                        struct xfs_buf *bp;
1547                        bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1548                        list_del_init(&bp->b_lru);
1549                        if (bp->b_flags & XBF_WRITE_FAIL) {
1550                                xfs_alert(btp->bt_mount,
1551"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
1552                                        (long long)bp->b_bn);
1553                                xfs_alert(btp->bt_mount,
1554"Please run xfs_repair to determine the extent of the problem.");
1555                        }
1556                        xfs_buf_rele(bp);
1557                }
1558                if (loop++ != 0)
1559                        delay(100);
1560        }
1561}
1562
1563static enum lru_status
1564xfs_buftarg_isolate(
1565        struct list_head        *item,
1566        struct list_lru_one     *lru,
1567        spinlock_t              *lru_lock,
1568        void                    *arg)
1569{
1570        struct xfs_buf          *bp = container_of(item, struct xfs_buf, b_lru);
1571        struct list_head        *dispose = arg;
1572
1573        /*
1574         * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1575         * If we fail to get the lock, just skip it.
1576         */
1577        if (!spin_trylock(&bp->b_lock))
1578                return LRU_SKIP;
1579        /*
1580         * Decrement the b_lru_ref count unless the value is already
1581         * zero. If the value is already zero, we need to reclaim the
1582         * buffer, otherwise it gets another trip through the LRU.
1583         */
1584        if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1585                spin_unlock(&bp->b_lock);
1586                return LRU_ROTATE;
1587        }
1588
1589        bp->b_state |= XFS_BSTATE_DISPOSE;
1590        list_lru_isolate_move(lru, item, dispose);
1591        spin_unlock(&bp->b_lock);
1592        return LRU_REMOVED;
1593}
1594
1595static unsigned long
1596xfs_buftarg_shrink_scan(
1597        struct shrinker         *shrink,
1598        struct shrink_control   *sc)
1599{
1600        struct xfs_buftarg      *btp = container_of(shrink,
1601                                        struct xfs_buftarg, bt_shrinker);
1602        LIST_HEAD(dispose);
1603        unsigned long           freed;
1604
1605        freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1606                                     xfs_buftarg_isolate, &dispose);
1607
1608        while (!list_empty(&dispose)) {
1609                struct xfs_buf *bp;
1610                bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1611                list_del_init(&bp->b_lru);
1612                xfs_buf_rele(bp);
1613        }
1614
1615        return freed;
1616}
1617
1618static unsigned long
1619xfs_buftarg_shrink_count(
1620        struct shrinker         *shrink,
1621        struct shrink_control   *sc)
1622{
1623        struct xfs_buftarg      *btp = container_of(shrink,
1624                                        struct xfs_buftarg, bt_shrinker);
1625        return list_lru_shrink_count(&btp->bt_lru, sc);
1626}
1627
1628void
1629xfs_free_buftarg(
1630        struct xfs_mount        *mp,
1631        struct xfs_buftarg      *btp)
1632{
1633        unregister_shrinker(&btp->bt_shrinker);
1634        list_lru_destroy(&btp->bt_lru);
1635
1636        if (mp->m_flags & XFS_MOUNT_BARRIER)
1637                xfs_blkdev_issue_flush(btp);
1638
1639        kmem_free(btp);
1640}
1641
1642int
1643xfs_setsize_buftarg(
1644        xfs_buftarg_t           *btp,
1645        unsigned int            sectorsize)
1646{
1647        /* Set up metadata sector size info */
1648        btp->bt_meta_sectorsize = sectorsize;
1649        btp->bt_meta_sectormask = sectorsize - 1;
1650
1651        if (set_blocksize(btp->bt_bdev, sectorsize)) {
1652                xfs_warn(btp->bt_mount,
1653                        "Cannot set_blocksize to %u on device %pg",
1654                        sectorsize, btp->bt_bdev);
1655                return -EINVAL;
1656        }
1657
1658        /* Set up device logical sector size mask */
1659        btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1660        btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1661
1662        return 0;
1663}
1664
1665/*
1666 * When allocating the initial buffer target we have not yet
1667 * read in the superblock, so don't know what sized sectors
1668 * are being used at this early stage.  Play safe.
1669 */
1670STATIC int
1671xfs_setsize_buftarg_early(
1672        xfs_buftarg_t           *btp,
1673        struct block_device     *bdev)
1674{
1675        return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1676}
1677
1678xfs_buftarg_t *
1679xfs_alloc_buftarg(
1680        struct xfs_mount        *mp,
1681        struct block_device     *bdev)
1682{
1683        xfs_buftarg_t           *btp;
1684
1685        btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1686
1687        btp->bt_mount = mp;
1688        btp->bt_dev =  bdev->bd_dev;
1689        btp->bt_bdev = bdev;
1690        btp->bt_bdi = blk_get_backing_dev_info(bdev);
1691
1692        if (xfs_setsize_buftarg_early(btp, bdev))
1693                goto error;
1694
1695        if (list_lru_init(&btp->bt_lru))
1696                goto error;
1697
1698        btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1699        btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1700        btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1701        btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1702        register_shrinker(&btp->bt_shrinker);
1703        return btp;
1704
1705error:
1706        kmem_free(btp);
1707        return NULL;
1708}
1709
1710/*
1711 * Add a buffer to the delayed write list.
1712 *
1713 * This queues a buffer for writeout if it hasn't already been.  Note that
1714 * neither this routine nor the buffer list submission functions perform
1715 * any internal synchronization.  It is expected that the lists are thread-local
1716 * to the callers.
1717 *
1718 * Returns true if we queued up the buffer, or false if it already had
1719 * been on the buffer list.
1720 */
1721bool
1722xfs_buf_delwri_queue(
1723        struct xfs_buf          *bp,
1724        struct list_head        *list)
1725{
1726        ASSERT(xfs_buf_islocked(bp));
1727        ASSERT(!(bp->b_flags & XBF_READ));
1728
1729        /*
1730         * If the buffer is already marked delwri it already is queued up
1731         * by someone else for imediate writeout.  Just ignore it in that
1732         * case.
1733         */
1734        if (bp->b_flags & _XBF_DELWRI_Q) {
1735                trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1736                return false;
1737        }
1738
1739        trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1740
1741        /*
1742         * If a buffer gets written out synchronously or marked stale while it
1743         * is on a delwri list we lazily remove it. To do this, the other party
1744         * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1745         * It remains referenced and on the list.  In a rare corner case it
1746         * might get readded to a delwri list after the synchronous writeout, in
1747         * which case we need just need to re-add the flag here.
1748         */
1749        bp->b_flags |= _XBF_DELWRI_Q;
1750        if (list_empty(&bp->b_list)) {
1751                atomic_inc(&bp->b_hold);
1752                list_add_tail(&bp->b_list, list);
1753        }
1754
1755        return true;
1756}
1757
1758/*
1759 * Compare function is more complex than it needs to be because
1760 * the return value is only 32 bits and we are doing comparisons
1761 * on 64 bit values
1762 */
1763static int
1764xfs_buf_cmp(
1765        void            *priv,
1766        struct list_head *a,
1767        struct list_head *b)
1768{
1769        struct xfs_buf  *ap = container_of(a, struct xfs_buf, b_list);
1770        struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
1771        xfs_daddr_t             diff;
1772
1773        diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1774        if (diff < 0)
1775                return -1;
1776        if (diff > 0)
1777                return 1;
1778        return 0;
1779}
1780
1781static int
1782__xfs_buf_delwri_submit(
1783        struct list_head        *buffer_list,
1784        struct list_head        *io_list,
1785        bool                    wait)
1786{
1787        struct blk_plug         plug;
1788        struct xfs_buf          *bp, *n;
1789        int                     pinned = 0;
1790
1791        list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1792                if (!wait) {
1793                        if (xfs_buf_ispinned(bp)) {
1794                                pinned++;
1795                                continue;
1796                        }
1797                        if (!xfs_buf_trylock(bp))
1798                                continue;
1799                } else {
1800                        xfs_buf_lock(bp);
1801                }
1802
1803                /*
1804                 * Someone else might have written the buffer synchronously or
1805                 * marked it stale in the meantime.  In that case only the
1806                 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1807                 * reference and remove it from the list here.
1808                 */
1809                if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1810                        list_del_init(&bp->b_list);
1811                        xfs_buf_relse(bp);
1812                        continue;
1813                }
1814
1815                list_move_tail(&bp->b_list, io_list);
1816                trace_xfs_buf_delwri_split(bp, _RET_IP_);
1817        }
1818
1819        list_sort(NULL, io_list, xfs_buf_cmp);
1820
1821        blk_start_plug(&plug);
1822        list_for_each_entry_safe(bp, n, io_list, b_list) {
1823                bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1824                bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1825
1826                /*
1827                 * we do all Io submission async. This means if we need to wait
1828                 * for IO completion we need to take an extra reference so the
1829                 * buffer is still valid on the other side.
1830                 */
1831                if (wait)
1832                        xfs_buf_hold(bp);
1833                else
1834                        list_del_init(&bp->b_list);
1835
1836                xfs_buf_submit(bp);
1837        }
1838        blk_finish_plug(&plug);
1839
1840        return pinned;
1841}
1842
1843/*
1844 * Write out a buffer list asynchronously.
1845 *
1846 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1847 * out and not wait for I/O completion on any of the buffers.  This interface
1848 * is only safely useable for callers that can track I/O completion by higher
1849 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1850 * function.
1851 */
1852int
1853xfs_buf_delwri_submit_nowait(
1854        struct list_head        *buffer_list)
1855{
1856        LIST_HEAD               (io_list);
1857        return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1858}
1859
1860/*
1861 * Write out a buffer list synchronously.
1862 *
1863 * This will take the @buffer_list, write all buffers out and wait for I/O
1864 * completion on all of the buffers. @buffer_list is consumed by the function,
1865 * so callers must have some other way of tracking buffers if they require such
1866 * functionality.
1867 */
1868int
1869xfs_buf_delwri_submit(
1870        struct list_head        *buffer_list)
1871{
1872        LIST_HEAD               (io_list);
1873        int                     error = 0, error2;
1874        struct xfs_buf          *bp;
1875
1876        __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1877
1878        /* Wait for IO to complete. */
1879        while (!list_empty(&io_list)) {
1880                bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1881
1882                list_del_init(&bp->b_list);
1883
1884                /* locking the buffer will wait for async IO completion. */
1885                xfs_buf_lock(bp);
1886                error2 = bp->b_error;
1887                xfs_buf_relse(bp);
1888                if (!error)
1889                        error = error2;
1890        }
1891
1892        return error;
1893}
1894
1895int __init
1896xfs_buf_init(void)
1897{
1898        xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1899                                                KM_ZONE_HWALIGN, NULL);
1900        if (!xfs_buf_zone)
1901                goto out;
1902
1903        return 0;
1904
1905 out:
1906        return -ENOMEM;
1907}
1908
1909void
1910xfs_buf_terminate(void)
1911{
1912        kmem_zone_destroy(xfs_buf_zone);
1913}
1914