linux/fs/xfs/xfs_log_cil.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
   4 */
   5
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_extent_busy.h"
  14#include "xfs_trans.h"
  15#include "xfs_trans_priv.h"
  16#include "xfs_log.h"
  17#include "xfs_log_priv.h"
  18#include "xfs_trace.h"
  19
  20struct workqueue_struct *xfs_discard_wq;
  21
  22/*
  23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  24 * recover, so we don't allow failure here. Also, we allocate in a context that
  25 * we don't want to be issuing transactions from, so we need to tell the
  26 * allocation code this as well.
  27 *
  28 * We don't reserve any space for the ticket - we are going to steal whatever
  29 * space we require from transactions as they commit. To ensure we reserve all
  30 * the space required, we need to set the current reservation of the ticket to
  31 * zero so that we know to steal the initial transaction overhead from the
  32 * first transaction commit.
  33 */
  34static struct xlog_ticket *
  35xlog_cil_ticket_alloc(
  36        struct xlog     *log)
  37{
  38        struct xlog_ticket *tic;
  39
  40        tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0);
  41
  42        /*
  43         * set the current reservation to zero so we know to steal the basic
  44         * transaction overhead reservation from the first transaction commit.
  45         */
  46        tic->t_curr_res = 0;
  47        return tic;
  48}
  49
  50/*
  51 * Unavoidable forward declaration - xlog_cil_push_work() calls
  52 * xlog_cil_ctx_alloc() itself.
  53 */
  54static void xlog_cil_push_work(struct work_struct *work);
  55
  56static struct xfs_cil_ctx *
  57xlog_cil_ctx_alloc(void)
  58{
  59        struct xfs_cil_ctx      *ctx;
  60
  61        ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
  62        INIT_LIST_HEAD(&ctx->committing);
  63        INIT_LIST_HEAD(&ctx->busy_extents);
  64        INIT_WORK(&ctx->push_work, xlog_cil_push_work);
  65        return ctx;
  66}
  67
  68static void
  69xlog_cil_ctx_switch(
  70        struct xfs_cil          *cil,
  71        struct xfs_cil_ctx      *ctx)
  72{
  73        ctx->sequence = ++cil->xc_current_sequence;
  74        ctx->cil = cil;
  75        cil->xc_ctx = ctx;
  76}
  77
  78/*
  79 * After the first stage of log recovery is done, we know where the head and
  80 * tail of the log are. We need this log initialisation done before we can
  81 * initialise the first CIL checkpoint context.
  82 *
  83 * Here we allocate a log ticket to track space usage during a CIL push.  This
  84 * ticket is passed to xlog_write() directly so that we don't slowly leak log
  85 * space by failing to account for space used by log headers and additional
  86 * region headers for split regions.
  87 */
  88void
  89xlog_cil_init_post_recovery(
  90        struct xlog     *log)
  91{
  92        log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
  93        log->l_cilp->xc_ctx->sequence = 1;
  94}
  95
  96static inline int
  97xlog_cil_iovec_space(
  98        uint    niovecs)
  99{
 100        return round_up((sizeof(struct xfs_log_vec) +
 101                                        niovecs * sizeof(struct xfs_log_iovec)),
 102                        sizeof(uint64_t));
 103}
 104
 105/*
 106 * Allocate or pin log vector buffers for CIL insertion.
 107 *
 108 * The CIL currently uses disposable buffers for copying a snapshot of the
 109 * modified items into the log during a push. The biggest problem with this is
 110 * the requirement to allocate the disposable buffer during the commit if:
 111 *      a) does not exist; or
 112 *      b) it is too small
 113 *
 114 * If we do this allocation within xlog_cil_insert_format_items(), it is done
 115 * under the xc_ctx_lock, which means that a CIL push cannot occur during
 116 * the memory allocation. This means that we have a potential deadlock situation
 117 * under low memory conditions when we have lots of dirty metadata pinned in
 118 * the CIL and we need a CIL commit to occur to free memory.
 119 *
 120 * To avoid this, we need to move the memory allocation outside the
 121 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
 122 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
 123 * vector buffers between the check and the formatting of the item into the
 124 * log vector buffer within the xc_ctx_lock.
 125 *
 126 * Because the log vector buffer needs to be unchanged during the CIL push
 127 * process, we cannot share the buffer between the transaction commit (which
 128 * modifies the buffer) and the CIL push context that is writing the changes
 129 * into the log. This means skipping preallocation of buffer space is
 130 * unreliable, but we most definitely do not want to be allocating and freeing
 131 * buffers unnecessarily during commits when overwrites can be done safely.
 132 *
 133 * The simplest solution to this problem is to allocate a shadow buffer when a
 134 * log item is committed for the second time, and then to only use this buffer
 135 * if necessary. The buffer can remain attached to the log item until such time
 136 * it is needed, and this is the buffer that is reallocated to match the size of
 137 * the incoming modification. Then during the formatting of the item we can swap
 138 * the active buffer with the new one if we can't reuse the existing buffer. We
 139 * don't free the old buffer as it may be reused on the next modification if
 140 * it's size is right, otherwise we'll free and reallocate it at that point.
 141 *
 142 * This function builds a vector for the changes in each log item in the
 143 * transaction. It then works out the length of the buffer needed for each log
 144 * item, allocates them and attaches the vector to the log item in preparation
 145 * for the formatting step which occurs under the xc_ctx_lock.
 146 *
 147 * While this means the memory footprint goes up, it avoids the repeated
 148 * alloc/free pattern that repeated modifications of an item would otherwise
 149 * cause, and hence minimises the CPU overhead of such behaviour.
 150 */
 151static void
 152xlog_cil_alloc_shadow_bufs(
 153        struct xlog             *log,
 154        struct xfs_trans        *tp)
 155{
 156        struct xfs_log_item     *lip;
 157
 158        list_for_each_entry(lip, &tp->t_items, li_trans) {
 159                struct xfs_log_vec *lv;
 160                int     niovecs = 0;
 161                int     nbytes = 0;
 162                int     buf_size;
 163                bool    ordered = false;
 164
 165                /* Skip items which aren't dirty in this transaction. */
 166                if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 167                        continue;
 168
 169                /* get number of vecs and size of data to be stored */
 170                lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 171
 172                /*
 173                 * Ordered items need to be tracked but we do not wish to write
 174                 * them. We need a logvec to track the object, but we do not
 175                 * need an iovec or buffer to be allocated for copying data.
 176                 */
 177                if (niovecs == XFS_LOG_VEC_ORDERED) {
 178                        ordered = true;
 179                        niovecs = 0;
 180                        nbytes = 0;
 181                }
 182
 183                /*
 184                 * We 64-bit align the length of each iovec so that the start
 185                 * of the next one is naturally aligned.  We'll need to
 186                 * account for that slack space here. Then round nbytes up
 187                 * to 64-bit alignment so that the initial buffer alignment is
 188                 * easy to calculate and verify.
 189                 */
 190                nbytes += niovecs * sizeof(uint64_t);
 191                nbytes = round_up(nbytes, sizeof(uint64_t));
 192
 193                /*
 194                 * The data buffer needs to start 64-bit aligned, so round up
 195                 * that space to ensure we can align it appropriately and not
 196                 * overrun the buffer.
 197                 */
 198                buf_size = nbytes + xlog_cil_iovec_space(niovecs);
 199
 200                /*
 201                 * if we have no shadow buffer, or it is too small, we need to
 202                 * reallocate it.
 203                 */
 204                if (!lip->li_lv_shadow ||
 205                    buf_size > lip->li_lv_shadow->lv_size) {
 206
 207                        /*
 208                         * We free and allocate here as a realloc would copy
 209                         * unnecessary data. We don't use kmem_zalloc() for the
 210                         * same reason - we don't need to zero the data area in
 211                         * the buffer, only the log vector header and the iovec
 212                         * storage.
 213                         */
 214                        kmem_free(lip->li_lv_shadow);
 215
 216                        /*
 217                         * We are in transaction context, which means this
 218                         * allocation will pick up GFP_NOFS from the
 219                         * memalloc_nofs_save/restore context the transaction
 220                         * holds. This means we can use GFP_KERNEL here so the
 221                         * generic kvmalloc() code will run vmalloc on
 222                         * contiguous page allocation failure as we require.
 223                         */
 224                        lv = kvmalloc(buf_size, GFP_KERNEL);
 225                        memset(lv, 0, xlog_cil_iovec_space(niovecs));
 226
 227                        lv->lv_item = lip;
 228                        lv->lv_size = buf_size;
 229                        if (ordered)
 230                                lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 231                        else
 232                                lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 233                        lip->li_lv_shadow = lv;
 234                } else {
 235                        /* same or smaller, optimise common overwrite case */
 236                        lv = lip->li_lv_shadow;
 237                        if (ordered)
 238                                lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 239                        else
 240                                lv->lv_buf_len = 0;
 241                        lv->lv_bytes = 0;
 242                        lv->lv_next = NULL;
 243                }
 244
 245                /* Ensure the lv is set up according to ->iop_size */
 246                lv->lv_niovecs = niovecs;
 247
 248                /* The allocated data region lies beyond the iovec region */
 249                lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
 250        }
 251
 252}
 253
 254/*
 255 * Prepare the log item for insertion into the CIL. Calculate the difference in
 256 * log space and vectors it will consume, and if it is a new item pin it as
 257 * well.
 258 */
 259STATIC void
 260xfs_cil_prepare_item(
 261        struct xlog             *log,
 262        struct xfs_log_vec      *lv,
 263        struct xfs_log_vec      *old_lv,
 264        int                     *diff_len,
 265        int                     *diff_iovecs)
 266{
 267        /* Account for the new LV being passed in */
 268        if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
 269                *diff_len += lv->lv_bytes;
 270                *diff_iovecs += lv->lv_niovecs;
 271        }
 272
 273        /*
 274         * If there is no old LV, this is the first time we've seen the item in
 275         * this CIL context and so we need to pin it. If we are replacing the
 276         * old_lv, then remove the space it accounts for and make it the shadow
 277         * buffer for later freeing. In both cases we are now switching to the
 278         * shadow buffer, so update the pointer to it appropriately.
 279         */
 280        if (!old_lv) {
 281                if (lv->lv_item->li_ops->iop_pin)
 282                        lv->lv_item->li_ops->iop_pin(lv->lv_item);
 283                lv->lv_item->li_lv_shadow = NULL;
 284        } else if (old_lv != lv) {
 285                ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 286
 287                *diff_len -= old_lv->lv_bytes;
 288                *diff_iovecs -= old_lv->lv_niovecs;
 289                lv->lv_item->li_lv_shadow = old_lv;
 290        }
 291
 292        /* attach new log vector to log item */
 293        lv->lv_item->li_lv = lv;
 294
 295        /*
 296         * If this is the first time the item is being committed to the
 297         * CIL, store the sequence number on the log item so we can
 298         * tell in future commits whether this is the first checkpoint
 299         * the item is being committed into.
 300         */
 301        if (!lv->lv_item->li_seq)
 302                lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 303}
 304
 305/*
 306 * Format log item into a flat buffers
 307 *
 308 * For delayed logging, we need to hold a formatted buffer containing all the
 309 * changes on the log item. This enables us to relog the item in memory and
 310 * write it out asynchronously without needing to relock the object that was
 311 * modified at the time it gets written into the iclog.
 312 *
 313 * This function takes the prepared log vectors attached to each log item, and
 314 * formats the changes into the log vector buffer. The buffer it uses is
 315 * dependent on the current state of the vector in the CIL - the shadow lv is
 316 * guaranteed to be large enough for the current modification, but we will only
 317 * use that if we can't reuse the existing lv. If we can't reuse the existing
 318 * lv, then simple swap it out for the shadow lv. We don't free it - that is
 319 * done lazily either by th enext modification or the freeing of the log item.
 320 *
 321 * We don't set up region headers during this process; we simply copy the
 322 * regions into the flat buffer. We can do this because we still have to do a
 323 * formatting step to write the regions into the iclog buffer.  Writing the
 324 * ophdrs during the iclog write means that we can support splitting large
 325 * regions across iclog boundares without needing a change in the format of the
 326 * item/region encapsulation.
 327 *
 328 * Hence what we need to do now is change the rewrite the vector array to point
 329 * to the copied region inside the buffer we just allocated. This allows us to
 330 * format the regions into the iclog as though they are being formatted
 331 * directly out of the objects themselves.
 332 */
 333static void
 334xlog_cil_insert_format_items(
 335        struct xlog             *log,
 336        struct xfs_trans        *tp,
 337        int                     *diff_len,
 338        int                     *diff_iovecs)
 339{
 340        struct xfs_log_item     *lip;
 341
 342
 343        /* Bail out if we didn't find a log item.  */
 344        if (list_empty(&tp->t_items)) {
 345                ASSERT(0);
 346                return;
 347        }
 348
 349        list_for_each_entry(lip, &tp->t_items, li_trans) {
 350                struct xfs_log_vec *lv;
 351                struct xfs_log_vec *old_lv = NULL;
 352                struct xfs_log_vec *shadow;
 353                bool    ordered = false;
 354
 355                /* Skip items which aren't dirty in this transaction. */
 356                if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 357                        continue;
 358
 359                /*
 360                 * The formatting size information is already attached to
 361                 * the shadow lv on the log item.
 362                 */
 363                shadow = lip->li_lv_shadow;
 364                if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
 365                        ordered = true;
 366
 367                /* Skip items that do not have any vectors for writing */
 368                if (!shadow->lv_niovecs && !ordered)
 369                        continue;
 370
 371                /* compare to existing item size */
 372                old_lv = lip->li_lv;
 373                if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
 374                        /* same or smaller, optimise common overwrite case */
 375                        lv = lip->li_lv;
 376                        lv->lv_next = NULL;
 377
 378                        if (ordered)
 379                                goto insert;
 380
 381                        /*
 382                         * set the item up as though it is a new insertion so
 383                         * that the space reservation accounting is correct.
 384                         */
 385                        *diff_iovecs -= lv->lv_niovecs;
 386                        *diff_len -= lv->lv_bytes;
 387
 388                        /* Ensure the lv is set up according to ->iop_size */
 389                        lv->lv_niovecs = shadow->lv_niovecs;
 390
 391                        /* reset the lv buffer information for new formatting */
 392                        lv->lv_buf_len = 0;
 393                        lv->lv_bytes = 0;
 394                        lv->lv_buf = (char *)lv +
 395                                        xlog_cil_iovec_space(lv->lv_niovecs);
 396                } else {
 397                        /* switch to shadow buffer! */
 398                        lv = shadow;
 399                        lv->lv_item = lip;
 400                        if (ordered) {
 401                                /* track as an ordered logvec */
 402                                ASSERT(lip->li_lv == NULL);
 403                                goto insert;
 404                        }
 405                }
 406
 407                ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 408                lip->li_ops->iop_format(lip, lv);
 409insert:
 410                xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
 411        }
 412}
 413
 414/*
 415 * Insert the log items into the CIL and calculate the difference in space
 416 * consumed by the item. Add the space to the checkpoint ticket and calculate
 417 * if the change requires additional log metadata. If it does, take that space
 418 * as well. Remove the amount of space we added to the checkpoint ticket from
 419 * the current transaction ticket so that the accounting works out correctly.
 420 */
 421static void
 422xlog_cil_insert_items(
 423        struct xlog             *log,
 424        struct xfs_trans        *tp)
 425{
 426        struct xfs_cil          *cil = log->l_cilp;
 427        struct xfs_cil_ctx      *ctx = cil->xc_ctx;
 428        struct xfs_log_item     *lip;
 429        int                     len = 0;
 430        int                     diff_iovecs = 0;
 431        int                     iclog_space;
 432        int                     iovhdr_res = 0, split_res = 0, ctx_res = 0;
 433
 434        ASSERT(tp);
 435
 436        /*
 437         * We can do this safely because the context can't checkpoint until we
 438         * are done so it doesn't matter exactly how we update the CIL.
 439         */
 440        xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
 441
 442        spin_lock(&cil->xc_cil_lock);
 443
 444        /* account for space used by new iovec headers  */
 445        iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
 446        len += iovhdr_res;
 447        ctx->nvecs += diff_iovecs;
 448
 449        /* attach the transaction to the CIL if it has any busy extents */
 450        if (!list_empty(&tp->t_busy))
 451                list_splice_init(&tp->t_busy, &ctx->busy_extents);
 452
 453        /*
 454         * Now transfer enough transaction reservation to the context ticket
 455         * for the checkpoint. The context ticket is special - the unit
 456         * reservation has to grow as well as the current reservation as we
 457         * steal from tickets so we can correctly determine the space used
 458         * during the transaction commit.
 459         */
 460        if (ctx->ticket->t_curr_res == 0) {
 461                ctx_res = ctx->ticket->t_unit_res;
 462                ctx->ticket->t_curr_res = ctx_res;
 463                tp->t_ticket->t_curr_res -= ctx_res;
 464        }
 465
 466        /* do we need space for more log record headers? */
 467        iclog_space = log->l_iclog_size - log->l_iclog_hsize;
 468        if (len > 0 && (ctx->space_used / iclog_space !=
 469                                (ctx->space_used + len) / iclog_space)) {
 470                split_res = (len + iclog_space - 1) / iclog_space;
 471                /* need to take into account split region headers, too */
 472                split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
 473                ctx->ticket->t_unit_res += split_res;
 474                ctx->ticket->t_curr_res += split_res;
 475                tp->t_ticket->t_curr_res -= split_res;
 476                ASSERT(tp->t_ticket->t_curr_res >= len);
 477        }
 478        tp->t_ticket->t_curr_res -= len;
 479        ctx->space_used += len;
 480
 481        /*
 482         * If we've overrun the reservation, dump the tx details before we move
 483         * the log items. Shutdown is imminent...
 484         */
 485        if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
 486                xfs_warn(log->l_mp, "Transaction log reservation overrun:");
 487                xfs_warn(log->l_mp,
 488                         "  log items: %d bytes (iov hdrs: %d bytes)",
 489                         len, iovhdr_res);
 490                xfs_warn(log->l_mp, "  split region headers: %d bytes",
 491                         split_res);
 492                xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
 493                xlog_print_trans(tp);
 494        }
 495
 496        /*
 497         * Now (re-)position everything modified at the tail of the CIL.
 498         * We do this here so we only need to take the CIL lock once during
 499         * the transaction commit.
 500         */
 501        list_for_each_entry(lip, &tp->t_items, li_trans) {
 502
 503                /* Skip items which aren't dirty in this transaction. */
 504                if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
 505                        continue;
 506
 507                /*
 508                 * Only move the item if it isn't already at the tail. This is
 509                 * to prevent a transient list_empty() state when reinserting
 510                 * an item that is already the only item in the CIL.
 511                 */
 512                if (!list_is_last(&lip->li_cil, &cil->xc_cil))
 513                        list_move_tail(&lip->li_cil, &cil->xc_cil);
 514        }
 515
 516        spin_unlock(&cil->xc_cil_lock);
 517
 518        if (tp->t_ticket->t_curr_res < 0)
 519                xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
 520}
 521
 522static void
 523xlog_cil_free_logvec(
 524        struct xfs_log_vec      *log_vector)
 525{
 526        struct xfs_log_vec      *lv;
 527
 528        for (lv = log_vector; lv; ) {
 529                struct xfs_log_vec *next = lv->lv_next;
 530                kmem_free(lv);
 531                lv = next;
 532        }
 533}
 534
 535static void
 536xlog_discard_endio_work(
 537        struct work_struct      *work)
 538{
 539        struct xfs_cil_ctx      *ctx =
 540                container_of(work, struct xfs_cil_ctx, discard_endio_work);
 541        struct xfs_mount        *mp = ctx->cil->xc_log->l_mp;
 542
 543        xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
 544        kmem_free(ctx);
 545}
 546
 547/*
 548 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
 549 * pagb_lock.  Note that we need a unbounded workqueue, otherwise we might
 550 * get the execution delayed up to 30 seconds for weird reasons.
 551 */
 552static void
 553xlog_discard_endio(
 554        struct bio              *bio)
 555{
 556        struct xfs_cil_ctx      *ctx = bio->bi_private;
 557
 558        INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
 559        queue_work(xfs_discard_wq, &ctx->discard_endio_work);
 560        bio_put(bio);
 561}
 562
 563static void
 564xlog_discard_busy_extents(
 565        struct xfs_mount        *mp,
 566        struct xfs_cil_ctx      *ctx)
 567{
 568        struct list_head        *list = &ctx->busy_extents;
 569        struct xfs_extent_busy  *busyp;
 570        struct bio              *bio = NULL;
 571        struct blk_plug         plug;
 572        int                     error = 0;
 573
 574        ASSERT(xfs_has_discard(mp));
 575
 576        blk_start_plug(&plug);
 577        list_for_each_entry(busyp, list, list) {
 578                trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
 579                                         busyp->length);
 580
 581                error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
 582                                XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
 583                                XFS_FSB_TO_BB(mp, busyp->length),
 584                                GFP_NOFS, 0, &bio);
 585                if (error && error != -EOPNOTSUPP) {
 586                        xfs_info(mp,
 587         "discard failed for extent [0x%llx,%u], error %d",
 588                                 (unsigned long long)busyp->bno,
 589                                 busyp->length,
 590                                 error);
 591                        break;
 592                }
 593        }
 594
 595        if (bio) {
 596                bio->bi_private = ctx;
 597                bio->bi_end_io = xlog_discard_endio;
 598                submit_bio(bio);
 599        } else {
 600                xlog_discard_endio_work(&ctx->discard_endio_work);
 601        }
 602        blk_finish_plug(&plug);
 603}
 604
 605/*
 606 * Mark all items committed and clear busy extents. We free the log vector
 607 * chains in a separate pass so that we unpin the log items as quickly as
 608 * possible.
 609 */
 610static void
 611xlog_cil_committed(
 612        struct xfs_cil_ctx      *ctx)
 613{
 614        struct xfs_mount        *mp = ctx->cil->xc_log->l_mp;
 615        bool                    abort = xlog_is_shutdown(ctx->cil->xc_log);
 616
 617        /*
 618         * If the I/O failed, we're aborting the commit and already shutdown.
 619         * Wake any commit waiters before aborting the log items so we don't
 620         * block async log pushers on callbacks. Async log pushers explicitly do
 621         * not wait on log force completion because they may be holding locks
 622         * required to unpin items.
 623         */
 624        if (abort) {
 625                spin_lock(&ctx->cil->xc_push_lock);
 626                wake_up_all(&ctx->cil->xc_start_wait);
 627                wake_up_all(&ctx->cil->xc_commit_wait);
 628                spin_unlock(&ctx->cil->xc_push_lock);
 629        }
 630
 631        xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
 632                                        ctx->start_lsn, abort);
 633
 634        xfs_extent_busy_sort(&ctx->busy_extents);
 635        xfs_extent_busy_clear(mp, &ctx->busy_extents,
 636                              xfs_has_discard(mp) && !abort);
 637
 638        spin_lock(&ctx->cil->xc_push_lock);
 639        list_del(&ctx->committing);
 640        spin_unlock(&ctx->cil->xc_push_lock);
 641
 642        xlog_cil_free_logvec(ctx->lv_chain);
 643
 644        if (!list_empty(&ctx->busy_extents))
 645                xlog_discard_busy_extents(mp, ctx);
 646        else
 647                kmem_free(ctx);
 648}
 649
 650void
 651xlog_cil_process_committed(
 652        struct list_head        *list)
 653{
 654        struct xfs_cil_ctx      *ctx;
 655
 656        while ((ctx = list_first_entry_or_null(list,
 657                        struct xfs_cil_ctx, iclog_entry))) {
 658                list_del(&ctx->iclog_entry);
 659                xlog_cil_committed(ctx);
 660        }
 661}
 662
 663/*
 664* Record the LSN of the iclog we were just granted space to start writing into.
 665* If the context doesn't have a start_lsn recorded, then this iclog will
 666* contain the start record for the checkpoint. Otherwise this write contains
 667* the commit record for the checkpoint.
 668*/
 669void
 670xlog_cil_set_ctx_write_state(
 671        struct xfs_cil_ctx      *ctx,
 672        struct xlog_in_core     *iclog)
 673{
 674        struct xfs_cil          *cil = ctx->cil;
 675        xfs_lsn_t               lsn = be64_to_cpu(iclog->ic_header.h_lsn);
 676
 677        ASSERT(!ctx->commit_lsn);
 678        if (!ctx->start_lsn) {
 679                spin_lock(&cil->xc_push_lock);
 680                /*
 681                 * The LSN we need to pass to the log items on transaction
 682                 * commit is the LSN reported by the first log vector write, not
 683                 * the commit lsn. If we use the commit record lsn then we can
 684                 * move the tail beyond the grant write head.
 685                 */
 686                ctx->start_lsn = lsn;
 687                wake_up_all(&cil->xc_start_wait);
 688                spin_unlock(&cil->xc_push_lock);
 689                return;
 690        }
 691
 692        /*
 693         * Take a reference to the iclog for the context so that we still hold
 694         * it when xlog_write is done and has released it. This means the
 695         * context controls when the iclog is released for IO.
 696         */
 697        atomic_inc(&iclog->ic_refcnt);
 698
 699        /*
 700         * xlog_state_get_iclog_space() guarantees there is enough space in the
 701         * iclog for an entire commit record, so we can attach the context
 702         * callbacks now.  This needs to be done before we make the commit_lsn
 703         * visible to waiters so that checkpoints with commit records in the
 704         * same iclog order their IO completion callbacks in the same order that
 705         * the commit records appear in the iclog.
 706         */
 707        spin_lock(&cil->xc_log->l_icloglock);
 708        list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
 709        spin_unlock(&cil->xc_log->l_icloglock);
 710
 711        /*
 712         * Now we can record the commit LSN and wake anyone waiting for this
 713         * sequence to have the ordered commit record assigned to a physical
 714         * location in the log.
 715         */
 716        spin_lock(&cil->xc_push_lock);
 717        ctx->commit_iclog = iclog;
 718        ctx->commit_lsn = lsn;
 719        wake_up_all(&cil->xc_commit_wait);
 720        spin_unlock(&cil->xc_push_lock);
 721}
 722
 723
 724/*
 725 * Ensure that the order of log writes follows checkpoint sequence order. This
 726 * relies on the context LSN being zero until the log write has guaranteed the
 727 * LSN that the log write will start at via xlog_state_get_iclog_space().
 728 */
 729enum _record_type {
 730        _START_RECORD,
 731        _COMMIT_RECORD,
 732};
 733
 734static int
 735xlog_cil_order_write(
 736        struct xfs_cil          *cil,
 737        xfs_csn_t               sequence,
 738        enum _record_type       record)
 739{
 740        struct xfs_cil_ctx      *ctx;
 741
 742restart:
 743        spin_lock(&cil->xc_push_lock);
 744        list_for_each_entry(ctx, &cil->xc_committing, committing) {
 745                /*
 746                 * Avoid getting stuck in this loop because we were woken by the
 747                 * shutdown, but then went back to sleep once already in the
 748                 * shutdown state.
 749                 */
 750                if (xlog_is_shutdown(cil->xc_log)) {
 751                        spin_unlock(&cil->xc_push_lock);
 752                        return -EIO;
 753                }
 754
 755                /*
 756                 * Higher sequences will wait for this one so skip them.
 757                 * Don't wait for our own sequence, either.
 758                 */
 759                if (ctx->sequence >= sequence)
 760                        continue;
 761
 762                /* Wait until the LSN for the record has been recorded. */
 763                switch (record) {
 764                case _START_RECORD:
 765                        if (!ctx->start_lsn) {
 766                                xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
 767                                goto restart;
 768                        }
 769                        break;
 770                case _COMMIT_RECORD:
 771                        if (!ctx->commit_lsn) {
 772                                xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 773                                goto restart;
 774                        }
 775                        break;
 776                }
 777        }
 778        spin_unlock(&cil->xc_push_lock);
 779        return 0;
 780}
 781
 782/*
 783 * Write out the log vector change now attached to the CIL context. This will
 784 * write a start record that needs to be strictly ordered in ascending CIL
 785 * sequence order so that log recovery will always use in-order start LSNs when
 786 * replaying checkpoints.
 787 */
 788static int
 789xlog_cil_write_chain(
 790        struct xfs_cil_ctx      *ctx,
 791        struct xfs_log_vec      *chain)
 792{
 793        struct xlog             *log = ctx->cil->xc_log;
 794        int                     error;
 795
 796        error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
 797        if (error)
 798                return error;
 799        return xlog_write(log, ctx, chain, ctx->ticket, XLOG_START_TRANS);
 800}
 801
 802/*
 803 * Write out the commit record of a checkpoint transaction to close off a
 804 * running log write. These commit records are strictly ordered in ascending CIL
 805 * sequence order so that log recovery will always replay the checkpoints in the
 806 * correct order.
 807 */
 808static int
 809xlog_cil_write_commit_record(
 810        struct xfs_cil_ctx      *ctx)
 811{
 812        struct xlog             *log = ctx->cil->xc_log;
 813        struct xfs_log_iovec    reg = {
 814                .i_addr = NULL,
 815                .i_len = 0,
 816                .i_type = XLOG_REG_TYPE_COMMIT,
 817        };
 818        struct xfs_log_vec      vec = {
 819                .lv_niovecs = 1,
 820                .lv_iovecp = &reg,
 821        };
 822        int                     error;
 823
 824        if (xlog_is_shutdown(log))
 825                return -EIO;
 826
 827        error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
 828        if (error)
 829                return error;
 830
 831        error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS);
 832        if (error)
 833                xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
 834        return error;
 835}
 836
 837/*
 838 * Push the Committed Item List to the log.
 839 *
 840 * If the current sequence is the same as xc_push_seq we need to do a flush. If
 841 * xc_push_seq is less than the current sequence, then it has already been
 842 * flushed and we don't need to do anything - the caller will wait for it to
 843 * complete if necessary.
 844 *
 845 * xc_push_seq is checked unlocked against the sequence number for a match.
 846 * Hence we can allow log forces to run racily and not issue pushes for the
 847 * same sequence twice.  If we get a race between multiple pushes for the same
 848 * sequence they will block on the first one and then abort, hence avoiding
 849 * needless pushes.
 850 */
 851static void
 852xlog_cil_push_work(
 853        struct work_struct      *work)
 854{
 855        struct xfs_cil_ctx      *ctx =
 856                container_of(work, struct xfs_cil_ctx, push_work);
 857        struct xfs_cil          *cil = ctx->cil;
 858        struct xlog             *log = cil->xc_log;
 859        struct xfs_log_vec      *lv;
 860        struct xfs_cil_ctx      *new_ctx;
 861        struct xlog_ticket      *tic;
 862        int                     num_iovecs;
 863        int                     error = 0;
 864        struct xfs_trans_header thdr;
 865        struct xfs_log_iovec    lhdr;
 866        struct xfs_log_vec      lvhdr = { NULL };
 867        xfs_lsn_t               preflush_tail_lsn;
 868        xfs_csn_t               push_seq;
 869        struct bio              bio;
 870        DECLARE_COMPLETION_ONSTACK(bdev_flush);
 871        bool                    push_commit_stable;
 872
 873        new_ctx = xlog_cil_ctx_alloc();
 874        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 875
 876        down_write(&cil->xc_ctx_lock);
 877
 878        spin_lock(&cil->xc_push_lock);
 879        push_seq = cil->xc_push_seq;
 880        ASSERT(push_seq <= ctx->sequence);
 881        push_commit_stable = cil->xc_push_commit_stable;
 882        cil->xc_push_commit_stable = false;
 883
 884        /*
 885         * As we are about to switch to a new, empty CIL context, we no longer
 886         * need to throttle tasks on CIL space overruns. Wake any waiters that
 887         * the hard push throttle may have caught so they can start committing
 888         * to the new context. The ctx->xc_push_lock provides the serialisation
 889         * necessary for safely using the lockless waitqueue_active() check in
 890         * this context.
 891         */
 892        if (waitqueue_active(&cil->xc_push_wait))
 893                wake_up_all(&cil->xc_push_wait);
 894
 895        /*
 896         * Check if we've anything to push. If there is nothing, then we don't
 897         * move on to a new sequence number and so we have to be able to push
 898         * this sequence again later.
 899         */
 900        if (list_empty(&cil->xc_cil)) {
 901                cil->xc_push_seq = 0;
 902                spin_unlock(&cil->xc_push_lock);
 903                goto out_skip;
 904        }
 905
 906
 907        /* check for a previously pushed sequence */
 908        if (push_seq < ctx->sequence) {
 909                spin_unlock(&cil->xc_push_lock);
 910                goto out_skip;
 911        }
 912
 913        /*
 914         * We are now going to push this context, so add it to the committing
 915         * list before we do anything else. This ensures that anyone waiting on
 916         * this push can easily detect the difference between a "push in
 917         * progress" and "CIL is empty, nothing to do".
 918         *
 919         * IOWs, a wait loop can now check for:
 920         *      the current sequence not being found on the committing list;
 921         *      an empty CIL; and
 922         *      an unchanged sequence number
 923         * to detect a push that had nothing to do and therefore does not need
 924         * waiting on. If the CIL is not empty, we get put on the committing
 925         * list before emptying the CIL and bumping the sequence number. Hence
 926         * an empty CIL and an unchanged sequence number means we jumped out
 927         * above after doing nothing.
 928         *
 929         * Hence the waiter will either find the commit sequence on the
 930         * committing list or the sequence number will be unchanged and the CIL
 931         * still dirty. In that latter case, the push has not yet started, and
 932         * so the waiter will have to continue trying to check the CIL
 933         * committing list until it is found. In extreme cases of delay, the
 934         * sequence may fully commit between the attempts the wait makes to wait
 935         * on the commit sequence.
 936         */
 937        list_add(&ctx->committing, &cil->xc_committing);
 938        spin_unlock(&cil->xc_push_lock);
 939
 940        /*
 941         * The CIL is stable at this point - nothing new will be added to it
 942         * because we hold the flush lock exclusively. Hence we can now issue
 943         * a cache flush to ensure all the completed metadata in the journal we
 944         * are about to overwrite is on stable storage.
 945         *
 946         * Because we are issuing this cache flush before we've written the
 947         * tail lsn to the iclog, we can have metadata IO completions move the
 948         * tail forwards between the completion of this flush and the iclog
 949         * being written. In this case, we need to re-issue the cache flush
 950         * before the iclog write. To detect whether the log tail moves, sample
 951         * the tail LSN *before* we issue the flush.
 952         */
 953        preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
 954        xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
 955                                &bdev_flush);
 956
 957        /*
 958         * Pull all the log vectors off the items in the CIL, and remove the
 959         * items from the CIL. We don't need the CIL lock here because it's only
 960         * needed on the transaction commit side which is currently locked out
 961         * by the flush lock.
 962         */
 963        lv = NULL;
 964        num_iovecs = 0;
 965        while (!list_empty(&cil->xc_cil)) {
 966                struct xfs_log_item     *item;
 967
 968                item = list_first_entry(&cil->xc_cil,
 969                                        struct xfs_log_item, li_cil);
 970                list_del_init(&item->li_cil);
 971                if (!ctx->lv_chain)
 972                        ctx->lv_chain = item->li_lv;
 973                else
 974                        lv->lv_next = item->li_lv;
 975                lv = item->li_lv;
 976                item->li_lv = NULL;
 977                num_iovecs += lv->lv_niovecs;
 978        }
 979
 980        /*
 981         * Switch the contexts so we can drop the context lock and move out
 982         * of a shared context. We can't just go straight to the commit record,
 983         * though - we need to synchronise with previous and future commits so
 984         * that the commit records are correctly ordered in the log to ensure
 985         * that we process items during log IO completion in the correct order.
 986         *
 987         * For example, if we get an EFI in one checkpoint and the EFD in the
 988         * next (e.g. due to log forces), we do not want the checkpoint with
 989         * the EFD to be committed before the checkpoint with the EFI.  Hence
 990         * we must strictly order the commit records of the checkpoints so
 991         * that: a) the checkpoint callbacks are attached to the iclogs in the
 992         * correct order; and b) the checkpoints are replayed in correct order
 993         * in log recovery.
 994         *
 995         * Hence we need to add this context to the committing context list so
 996         * that higher sequences will wait for us to write out a commit record
 997         * before they do.
 998         *
 999         * xfs_log_force_seq requires us to mirror the new sequence into the cil
1000         * structure atomically with the addition of this sequence to the
1001         * committing list. This also ensures that we can do unlocked checks
1002         * against the current sequence in log forces without risking
1003         * deferencing a freed context pointer.
1004         */
1005        spin_lock(&cil->xc_push_lock);
1006        xlog_cil_ctx_switch(cil, new_ctx);
1007        spin_unlock(&cil->xc_push_lock);
1008        up_write(&cil->xc_ctx_lock);
1009
1010        /*
1011         * Build a checkpoint transaction header and write it to the log to
1012         * begin the transaction. We need to account for the space used by the
1013         * transaction header here as it is not accounted for in xlog_write().
1014         *
1015         * The LSN we need to pass to the log items on transaction commit is
1016         * the LSN reported by the first log vector write. If we use the commit
1017         * record lsn then we can move the tail beyond the grant write head.
1018         */
1019        tic = ctx->ticket;
1020        thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1021        thdr.th_type = XFS_TRANS_CHECKPOINT;
1022        thdr.th_tid = tic->t_tid;
1023        thdr.th_num_items = num_iovecs;
1024        lhdr.i_addr = &thdr;
1025        lhdr.i_len = sizeof(xfs_trans_header_t);
1026        lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
1027        tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
1028
1029        lvhdr.lv_niovecs = 1;
1030        lvhdr.lv_iovecp = &lhdr;
1031        lvhdr.lv_next = ctx->lv_chain;
1032
1033        /*
1034         * Before we format and submit the first iclog, we have to ensure that
1035         * the metadata writeback ordering cache flush is complete.
1036         */
1037        wait_for_completion(&bdev_flush);
1038
1039        error = xlog_cil_write_chain(ctx, &lvhdr);
1040        if (error)
1041                goto out_abort_free_ticket;
1042
1043        error = xlog_cil_write_commit_record(ctx);
1044        if (error)
1045                goto out_abort_free_ticket;
1046
1047        xfs_log_ticket_ungrant(log, tic);
1048
1049        /*
1050         * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1051         * to complete before we submit the commit_iclog. We can't use state
1052         * checks for this - ACTIVE can be either a past completed iclog or a
1053         * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1054         * past or future iclog awaiting IO or ordered IO completion to be run.
1055         * In the latter case, if it's a future iclog and we wait on it, the we
1056         * will hang because it won't get processed through to ic_force_wait
1057         * wakeup until this commit_iclog is written to disk.  Hence we use the
1058         * iclog header lsn and compare it to the commit lsn to determine if we
1059         * need to wait on iclogs or not.
1060         */
1061        spin_lock(&log->l_icloglock);
1062        if (ctx->start_lsn != ctx->commit_lsn) {
1063                xfs_lsn_t       plsn;
1064
1065                plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1066                if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1067                        /*
1068                         * Waiting on ic_force_wait orders the completion of
1069                         * iclogs older than ic_prev. Hence we only need to wait
1070                         * on the most recent older iclog here.
1071                         */
1072                        xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1073                        spin_lock(&log->l_icloglock);
1074                }
1075
1076                /*
1077                 * We need to issue a pre-flush so that the ordering for this
1078                 * checkpoint is correctly preserved down to stable storage.
1079                 */
1080                ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1081        }
1082
1083        /*
1084         * The commit iclog must be written to stable storage to guarantee
1085         * journal IO vs metadata writeback IO is correctly ordered on stable
1086         * storage.
1087         *
1088         * If the push caller needs the commit to be immediately stable and the
1089         * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1090         * will be written when released, switch it's state to WANT_SYNC right
1091         * now.
1092         */
1093        ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1094        if (push_commit_stable &&
1095            ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1096                xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1097        xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn);
1098
1099        /* Not safe to reference ctx now! */
1100
1101        spin_unlock(&log->l_icloglock);
1102        return;
1103
1104out_skip:
1105        up_write(&cil->xc_ctx_lock);
1106        xfs_log_ticket_put(new_ctx->ticket);
1107        kmem_free(new_ctx);
1108        return;
1109
1110out_abort_free_ticket:
1111        xfs_log_ticket_ungrant(log, tic);
1112        ASSERT(xlog_is_shutdown(log));
1113        if (!ctx->commit_iclog) {
1114                xlog_cil_committed(ctx);
1115                return;
1116        }
1117        spin_lock(&log->l_icloglock);
1118        xlog_state_release_iclog(log, ctx->commit_iclog, 0);
1119        /* Not safe to reference ctx now! */
1120        spin_unlock(&log->l_icloglock);
1121}
1122
1123/*
1124 * We need to push CIL every so often so we don't cache more than we can fit in
1125 * the log. The limit really is that a checkpoint can't be more than half the
1126 * log (the current checkpoint is not allowed to overwrite the previous
1127 * checkpoint), but commit latency and memory usage limit this to a smaller
1128 * size.
1129 */
1130static void
1131xlog_cil_push_background(
1132        struct xlog     *log) __releases(cil->xc_ctx_lock)
1133{
1134        struct xfs_cil  *cil = log->l_cilp;
1135
1136        /*
1137         * The cil won't be empty because we are called while holding the
1138         * context lock so whatever we added to the CIL will still be there
1139         */
1140        ASSERT(!list_empty(&cil->xc_cil));
1141
1142        /*
1143         * Don't do a background push if we haven't used up all the
1144         * space available yet.
1145         */
1146        if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
1147                up_read(&cil->xc_ctx_lock);
1148                return;
1149        }
1150
1151        spin_lock(&cil->xc_push_lock);
1152        if (cil->xc_push_seq < cil->xc_current_sequence) {
1153                cil->xc_push_seq = cil->xc_current_sequence;
1154                queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1155        }
1156
1157        /*
1158         * Drop the context lock now, we can't hold that if we need to sleep
1159         * because we are over the blocking threshold. The push_lock is still
1160         * held, so blocking threshold sleep/wakeup is still correctly
1161         * serialised here.
1162         */
1163        up_read(&cil->xc_ctx_lock);
1164
1165        /*
1166         * If we are well over the space limit, throttle the work that is being
1167         * done until the push work on this context has begun. Enforce the hard
1168         * throttle on all transaction commits once it has been activated, even
1169         * if the committing transactions have resulted in the space usage
1170         * dipping back down under the hard limit.
1171         *
1172         * The ctx->xc_push_lock provides the serialisation necessary for safely
1173         * using the lockless waitqueue_active() check in this context.
1174         */
1175        if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
1176            waitqueue_active(&cil->xc_push_wait)) {
1177                trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1178                ASSERT(cil->xc_ctx->space_used < log->l_logsize);
1179                xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1180                return;
1181        }
1182
1183        spin_unlock(&cil->xc_push_lock);
1184
1185}
1186
1187/*
1188 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1189 * number that is passed. When it returns, the work will be queued for
1190 * @push_seq, but it won't be completed.
1191 *
1192 * If the caller is performing a synchronous force, we will flush the workqueue
1193 * to get previously queued work moving to minimise the wait time they will
1194 * undergo waiting for all outstanding pushes to complete. The caller is
1195 * expected to do the required waiting for push_seq to complete.
1196 *
1197 * If the caller is performing an async push, we need to ensure that the
1198 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1199 * don't do this, then the commit record may remain sitting in memory in an
1200 * ACTIVE iclog. This then requires another full log force to push to disk,
1201 * which defeats the purpose of having an async, non-blocking CIL force
1202 * mechanism. Hence in this case we need to pass a flag to the push work to
1203 * indicate it needs to flush the commit record itself.
1204 */
1205static void
1206xlog_cil_push_now(
1207        struct xlog     *log,
1208        xfs_lsn_t       push_seq,
1209        bool            async)
1210{
1211        struct xfs_cil  *cil = log->l_cilp;
1212
1213        if (!cil)
1214                return;
1215
1216        ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1217
1218        /* start on any pending background push to minimise wait time on it */
1219        if (!async)
1220                flush_workqueue(cil->xc_push_wq);
1221
1222        /*
1223         * If the CIL is empty or we've already pushed the sequence then
1224         * there's no work we need to do.
1225         */
1226        spin_lock(&cil->xc_push_lock);
1227        if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
1228                spin_unlock(&cil->xc_push_lock);
1229                return;
1230        }
1231
1232        cil->xc_push_seq = push_seq;
1233        cil->xc_push_commit_stable = async;
1234        queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1235        spin_unlock(&cil->xc_push_lock);
1236}
1237
1238bool
1239xlog_cil_empty(
1240        struct xlog     *log)
1241{
1242        struct xfs_cil  *cil = log->l_cilp;
1243        bool            empty = false;
1244
1245        spin_lock(&cil->xc_push_lock);
1246        if (list_empty(&cil->xc_cil))
1247                empty = true;
1248        spin_unlock(&cil->xc_push_lock);
1249        return empty;
1250}
1251
1252/*
1253 * Commit a transaction with the given vector to the Committed Item List.
1254 *
1255 * To do this, we need to format the item, pin it in memory if required and
1256 * account for the space used by the transaction. Once we have done that we
1257 * need to release the unused reservation for the transaction, attach the
1258 * transaction to the checkpoint context so we carry the busy extents through
1259 * to checkpoint completion, and then unlock all the items in the transaction.
1260 *
1261 * Called with the context lock already held in read mode to lock out
1262 * background commit, returns without it held once background commits are
1263 * allowed again.
1264 */
1265void
1266xlog_cil_commit(
1267        struct xlog             *log,
1268        struct xfs_trans        *tp,
1269        xfs_csn_t               *commit_seq,
1270        bool                    regrant)
1271{
1272        struct xfs_cil          *cil = log->l_cilp;
1273        struct xfs_log_item     *lip, *next;
1274
1275        /*
1276         * Do all necessary memory allocation before we lock the CIL.
1277         * This ensures the allocation does not deadlock with a CIL
1278         * push in memory reclaim (e.g. from kswapd).
1279         */
1280        xlog_cil_alloc_shadow_bufs(log, tp);
1281
1282        /* lock out background commit */
1283        down_read(&cil->xc_ctx_lock);
1284
1285        xlog_cil_insert_items(log, tp);
1286
1287        if (regrant && !xlog_is_shutdown(log))
1288                xfs_log_ticket_regrant(log, tp->t_ticket);
1289        else
1290                xfs_log_ticket_ungrant(log, tp->t_ticket);
1291        tp->t_ticket = NULL;
1292        xfs_trans_unreserve_and_mod_sb(tp);
1293
1294        /*
1295         * Once all the items of the transaction have been copied to the CIL,
1296         * the items can be unlocked and possibly freed.
1297         *
1298         * This needs to be done before we drop the CIL context lock because we
1299         * have to update state in the log items and unlock them before they go
1300         * to disk. If we don't, then the CIL checkpoint can race with us and
1301         * we can run checkpoint completion before we've updated and unlocked
1302         * the log items. This affects (at least) processing of stale buffers,
1303         * inodes and EFIs.
1304         */
1305        trace_xfs_trans_commit_items(tp, _RET_IP_);
1306        list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1307                xfs_trans_del_item(lip);
1308                if (lip->li_ops->iop_committing)
1309                        lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1310        }
1311        if (commit_seq)
1312                *commit_seq = cil->xc_ctx->sequence;
1313
1314        /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1315        xlog_cil_push_background(log);
1316}
1317
1318/*
1319 * Flush the CIL to stable storage but don't wait for it to complete. This
1320 * requires the CIL push to ensure the commit record for the push hits the disk,
1321 * but otherwise is no different to a push done from a log force.
1322 */
1323void
1324xlog_cil_flush(
1325        struct xlog     *log)
1326{
1327        xfs_csn_t       seq = log->l_cilp->xc_current_sequence;
1328
1329        trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1330        xlog_cil_push_now(log, seq, true);
1331}
1332
1333/*
1334 * Conditionally push the CIL based on the sequence passed in.
1335 *
1336 * We only need to push if we haven't already pushed the sequence number given.
1337 * Hence the only time we will trigger a push here is if the push sequence is
1338 * the same as the current context.
1339 *
1340 * We return the current commit lsn to allow the callers to determine if a
1341 * iclog flush is necessary following this call.
1342 */
1343xfs_lsn_t
1344xlog_cil_force_seq(
1345        struct xlog     *log,
1346        xfs_csn_t       sequence)
1347{
1348        struct xfs_cil          *cil = log->l_cilp;
1349        struct xfs_cil_ctx      *ctx;
1350        xfs_lsn_t               commit_lsn = NULLCOMMITLSN;
1351
1352        ASSERT(sequence <= cil->xc_current_sequence);
1353
1354        if (!sequence)
1355                sequence = cil->xc_current_sequence;
1356        trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1357
1358        /*
1359         * check to see if we need to force out the current context.
1360         * xlog_cil_push() handles racing pushes for the same sequence,
1361         * so no need to deal with it here.
1362         */
1363restart:
1364        xlog_cil_push_now(log, sequence, false);
1365
1366        /*
1367         * See if we can find a previous sequence still committing.
1368         * We need to wait for all previous sequence commits to complete
1369         * before allowing the force of push_seq to go ahead. Hence block
1370         * on commits for those as well.
1371         */
1372        spin_lock(&cil->xc_push_lock);
1373        list_for_each_entry(ctx, &cil->xc_committing, committing) {
1374                /*
1375                 * Avoid getting stuck in this loop because we were woken by the
1376                 * shutdown, but then went back to sleep once already in the
1377                 * shutdown state.
1378                 */
1379                if (xlog_is_shutdown(log))
1380                        goto out_shutdown;
1381                if (ctx->sequence > sequence)
1382                        continue;
1383                if (!ctx->commit_lsn) {
1384                        /*
1385                         * It is still being pushed! Wait for the push to
1386                         * complete, then start again from the beginning.
1387                         */
1388                        XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1389                        xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1390                        goto restart;
1391                }
1392                if (ctx->sequence != sequence)
1393                        continue;
1394                /* found it! */
1395                commit_lsn = ctx->commit_lsn;
1396        }
1397
1398        /*
1399         * The call to xlog_cil_push_now() executes the push in the background.
1400         * Hence by the time we have got here it our sequence may not have been
1401         * pushed yet. This is true if the current sequence still matches the
1402         * push sequence after the above wait loop and the CIL still contains
1403         * dirty objects. This is guaranteed by the push code first adding the
1404         * context to the committing list before emptying the CIL.
1405         *
1406         * Hence if we don't find the context in the committing list and the
1407         * current sequence number is unchanged then the CIL contents are
1408         * significant.  If the CIL is empty, if means there was nothing to push
1409         * and that means there is nothing to wait for. If the CIL is not empty,
1410         * it means we haven't yet started the push, because if it had started
1411         * we would have found the context on the committing list.
1412         */
1413        if (sequence == cil->xc_current_sequence &&
1414            !list_empty(&cil->xc_cil)) {
1415                spin_unlock(&cil->xc_push_lock);
1416                goto restart;
1417        }
1418
1419        spin_unlock(&cil->xc_push_lock);
1420        return commit_lsn;
1421
1422        /*
1423         * We detected a shutdown in progress. We need to trigger the log force
1424         * to pass through it's iclog state machine error handling, even though
1425         * we are already in a shutdown state. Hence we can't return
1426         * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1427         * LSN is already stable), so we return a zero LSN instead.
1428         */
1429out_shutdown:
1430        spin_unlock(&cil->xc_push_lock);
1431        return 0;
1432}
1433
1434/*
1435 * Check if the current log item was first committed in this sequence.
1436 * We can't rely on just the log item being in the CIL, we have to check
1437 * the recorded commit sequence number.
1438 *
1439 * Note: for this to be used in a non-racy manner, it has to be called with
1440 * CIL flushing locked out. As a result, it should only be used during the
1441 * transaction commit process when deciding what to format into the item.
1442 */
1443bool
1444xfs_log_item_in_current_chkpt(
1445        struct xfs_log_item *lip)
1446{
1447        struct xfs_cil_ctx *ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1448
1449        if (list_empty(&lip->li_cil))
1450                return false;
1451
1452        /*
1453         * li_seq is written on the first commit of a log item to record the
1454         * first checkpoint it is written to. Hence if it is different to the
1455         * current sequence, we're in a new checkpoint.
1456         */
1457        return lip->li_seq == ctx->sequence;
1458}
1459
1460/*
1461 * Perform initial CIL structure initialisation.
1462 */
1463int
1464xlog_cil_init(
1465        struct xlog     *log)
1466{
1467        struct xfs_cil  *cil;
1468        struct xfs_cil_ctx *ctx;
1469
1470        cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1471        if (!cil)
1472                return -ENOMEM;
1473        /*
1474         * Limit the CIL pipeline depth to 4 concurrent works to bound the
1475         * concurrency the log spinlocks will be exposed to.
1476         */
1477        cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1478                        XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1479                        4, log->l_mp->m_super->s_id);
1480        if (!cil->xc_push_wq)
1481                goto out_destroy_cil;
1482
1483        INIT_LIST_HEAD(&cil->xc_cil);
1484        INIT_LIST_HEAD(&cil->xc_committing);
1485        spin_lock_init(&cil->xc_cil_lock);
1486        spin_lock_init(&cil->xc_push_lock);
1487        init_waitqueue_head(&cil->xc_push_wait);
1488        init_rwsem(&cil->xc_ctx_lock);
1489        init_waitqueue_head(&cil->xc_start_wait);
1490        init_waitqueue_head(&cil->xc_commit_wait);
1491        cil->xc_log = log;
1492        log->l_cilp = cil;
1493
1494        ctx = xlog_cil_ctx_alloc();
1495        xlog_cil_ctx_switch(cil, ctx);
1496
1497        return 0;
1498
1499out_destroy_cil:
1500        kmem_free(cil);
1501        return -ENOMEM;
1502}
1503
1504void
1505xlog_cil_destroy(
1506        struct xlog     *log)
1507{
1508        if (log->l_cilp->xc_ctx) {
1509                if (log->l_cilp->xc_ctx->ticket)
1510                        xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1511                kmem_free(log->l_cilp->xc_ctx);
1512        }
1513
1514        ASSERT(list_empty(&log->l_cilp->xc_cil));
1515        destroy_workqueue(log->l_cilp->xc_push_wq);
1516        kmem_free(log->l_cilp);
1517}
1518
1519