linux/fs/xfs/xfs_log_cil.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it would be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write the Free Software Foundation,
  15 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  16 */
  17
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_log_format.h"
  21#include "xfs_shared.h"
  22#include "xfs_trans_resv.h"
  23#include "xfs_sb.h"
  24#include "xfs_ag.h"
  25#include "xfs_mount.h"
  26#include "xfs_error.h"
  27#include "xfs_alloc.h"
  28#include "xfs_extent_busy.h"
  29#include "xfs_discard.h"
  30#include "xfs_trans.h"
  31#include "xfs_trans_priv.h"
  32#include "xfs_log.h"
  33#include "xfs_log_priv.h"
  34
  35/*
  36 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  37 * recover, so we don't allow failure here. Also, we allocate in a context that
  38 * we don't want to be issuing transactions from, so we need to tell the
  39 * allocation code this as well.
  40 *
  41 * We don't reserve any space for the ticket - we are going to steal whatever
  42 * space we require from transactions as they commit. To ensure we reserve all
  43 * the space required, we need to set the current reservation of the ticket to
  44 * zero so that we know to steal the initial transaction overhead from the
  45 * first transaction commit.
  46 */
  47static struct xlog_ticket *
  48xlog_cil_ticket_alloc(
  49        struct xlog     *log)
  50{
  51        struct xlog_ticket *tic;
  52
  53        tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
  54                                KM_SLEEP|KM_NOFS);
  55        tic->t_trans_type = XFS_TRANS_CHECKPOINT;
  56
  57        /*
  58         * set the current reservation to zero so we know to steal the basic
  59         * transaction overhead reservation from the first transaction commit.
  60         */
  61        tic->t_curr_res = 0;
  62        return tic;
  63}
  64
  65/*
  66 * After the first stage of log recovery is done, we know where the head and
  67 * tail of the log are. We need this log initialisation done before we can
  68 * initialise the first CIL checkpoint context.
  69 *
  70 * Here we allocate a log ticket to track space usage during a CIL push.  This
  71 * ticket is passed to xlog_write() directly so that we don't slowly leak log
  72 * space by failing to account for space used by log headers and additional
  73 * region headers for split regions.
  74 */
  75void
  76xlog_cil_init_post_recovery(
  77        struct xlog     *log)
  78{
  79        log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
  80        log->l_cilp->xc_ctx->sequence = 1;
  81}
  82
  83/*
  84 * Prepare the log item for insertion into the CIL. Calculate the difference in
  85 * log space and vectors it will consume, and if it is a new item pin it as
  86 * well.
  87 */
  88STATIC void
  89xfs_cil_prepare_item(
  90        struct xlog             *log,
  91        struct xfs_log_vec      *lv,
  92        struct xfs_log_vec      *old_lv,
  93        int                     *diff_len,
  94        int                     *diff_iovecs)
  95{
  96        /* Account for the new LV being passed in */
  97        if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
  98                *diff_len += lv->lv_bytes;
  99                *diff_iovecs += lv->lv_niovecs;
 100        }
 101
 102        /*
 103         * If there is no old LV, this is the first time we've seen the item in
 104         * this CIL context and so we need to pin it. If we are replacing the
 105         * old_lv, then remove the space it accounts for and free it.
 106         */
 107        if (!old_lv)
 108                lv->lv_item->li_ops->iop_pin(lv->lv_item);
 109        else if (old_lv != lv) {
 110                ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
 111
 112                *diff_len -= old_lv->lv_bytes;
 113                *diff_iovecs -= old_lv->lv_niovecs;
 114                kmem_free(old_lv);
 115        }
 116
 117        /* attach new log vector to log item */
 118        lv->lv_item->li_lv = lv;
 119
 120        /*
 121         * If this is the first time the item is being committed to the
 122         * CIL, store the sequence number on the log item so we can
 123         * tell in future commits whether this is the first checkpoint
 124         * the item is being committed into.
 125         */
 126        if (!lv->lv_item->li_seq)
 127                lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
 128}
 129
 130/*
 131 * Format log item into a flat buffers
 132 *
 133 * For delayed logging, we need to hold a formatted buffer containing all the
 134 * changes on the log item. This enables us to relog the item in memory and
 135 * write it out asynchronously without needing to relock the object that was
 136 * modified at the time it gets written into the iclog.
 137 *
 138 * This function builds a vector for the changes in each log item in the
 139 * transaction. It then works out the length of the buffer needed for each log
 140 * item, allocates them and formats the vector for the item into the buffer.
 141 * The buffer is then attached to the log item are then inserted into the
 142 * Committed Item List for tracking until the next checkpoint is written out.
 143 *
 144 * We don't set up region headers during this process; we simply copy the
 145 * regions into the flat buffer. We can do this because we still have to do a
 146 * formatting step to write the regions into the iclog buffer.  Writing the
 147 * ophdrs during the iclog write means that we can support splitting large
 148 * regions across iclog boundares without needing a change in the format of the
 149 * item/region encapsulation.
 150 *
 151 * Hence what we need to do now is change the rewrite the vector array to point
 152 * to the copied region inside the buffer we just allocated. This allows us to
 153 * format the regions into the iclog as though they are being formatted
 154 * directly out of the objects themselves.
 155 */
 156static void
 157xlog_cil_insert_format_items(
 158        struct xlog             *log,
 159        struct xfs_trans        *tp,
 160        int                     *diff_len,
 161        int                     *diff_iovecs)
 162{
 163        struct xfs_log_item_desc *lidp;
 164
 165
 166        /* Bail out if we didn't find a log item.  */
 167        if (list_empty(&tp->t_items)) {
 168                ASSERT(0);
 169                return;
 170        }
 171
 172        list_for_each_entry(lidp, &tp->t_items, lid_trans) {
 173                struct xfs_log_item *lip = lidp->lid_item;
 174                struct xfs_log_vec *lv;
 175                struct xfs_log_vec *old_lv;
 176                int     niovecs = 0;
 177                int     nbytes = 0;
 178                int     buf_size;
 179                bool    ordered = false;
 180
 181                /* Skip items which aren't dirty in this transaction. */
 182                if (!(lidp->lid_flags & XFS_LID_DIRTY))
 183                        continue;
 184
 185                /* get number of vecs and size of data to be stored */
 186                lip->li_ops->iop_size(lip, &niovecs, &nbytes);
 187
 188                /* Skip items that do not have any vectors for writing */
 189                if (!niovecs)
 190                        continue;
 191
 192                /*
 193                 * Ordered items need to be tracked but we do not wish to write
 194                 * them. We need a logvec to track the object, but we do not
 195                 * need an iovec or buffer to be allocated for copying data.
 196                 */
 197                if (niovecs == XFS_LOG_VEC_ORDERED) {
 198                        ordered = true;
 199                        niovecs = 0;
 200                        nbytes = 0;
 201                }
 202
 203                /*
 204                 * We 64-bit align the length of each iovec so that the start
 205                 * of the next one is naturally aligned.  We'll need to
 206                 * account for that slack space here. Then round nbytes up
 207                 * to 64-bit alignment so that the initial buffer alignment is
 208                 * easy to calculate and verify.
 209                 */
 210                nbytes += niovecs * sizeof(uint64_t);
 211                nbytes = round_up(nbytes, sizeof(uint64_t));
 212
 213                /* grab the old item if it exists for reservation accounting */
 214                old_lv = lip->li_lv;
 215
 216                /*
 217                 * The data buffer needs to start 64-bit aligned, so round up
 218                 * that space to ensure we can align it appropriately and not
 219                 * overrun the buffer.
 220                 */
 221                buf_size = nbytes +
 222                           round_up((sizeof(struct xfs_log_vec) +
 223                                     niovecs * sizeof(struct xfs_log_iovec)),
 224                                    sizeof(uint64_t));
 225
 226                /* compare to existing item size */
 227                if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
 228                        /* same or smaller, optimise common overwrite case */
 229                        lv = lip->li_lv;
 230                        lv->lv_next = NULL;
 231
 232                        if (ordered)
 233                                goto insert;
 234
 235                        /*
 236                         * set the item up as though it is a new insertion so
 237                         * that the space reservation accounting is correct.
 238                         */
 239                        *diff_iovecs -= lv->lv_niovecs;
 240                        *diff_len -= lv->lv_bytes;
 241                } else {
 242                        /* allocate new data chunk */
 243                        lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
 244                        lv->lv_item = lip;
 245                        lv->lv_size = buf_size;
 246                        if (ordered) {
 247                                /* track as an ordered logvec */
 248                                ASSERT(lip->li_lv == NULL);
 249                                lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
 250                                goto insert;
 251                        }
 252                        lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 253                }
 254
 255                /* Ensure the lv is set up according to ->iop_size */
 256                lv->lv_niovecs = niovecs;
 257
 258                /* The allocated data region lies beyond the iovec region */
 259                lv->lv_buf_len = 0;
 260                lv->lv_bytes = 0;
 261                lv->lv_buf = (char *)lv + buf_size - nbytes;
 262                ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
 263
 264                lip->li_ops->iop_format(lip, lv);
 265insert:
 266                ASSERT(lv->lv_buf_len <= nbytes);
 267                xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
 268        }
 269}
 270
 271/*
 272 * Insert the log items into the CIL and calculate the difference in space
 273 * consumed by the item. Add the space to the checkpoint ticket and calculate
 274 * if the change requires additional log metadata. If it does, take that space
 275 * as well. Remove the amount of space we added to the checkpoint ticket from
 276 * the current transaction ticket so that the accounting works out correctly.
 277 */
 278static void
 279xlog_cil_insert_items(
 280        struct xlog             *log,
 281        struct xfs_trans        *tp)
 282{
 283        struct xfs_cil          *cil = log->l_cilp;
 284        struct xfs_cil_ctx      *ctx = cil->xc_ctx;
 285        struct xfs_log_item_desc *lidp;
 286        int                     len = 0;
 287        int                     diff_iovecs = 0;
 288        int                     iclog_space;
 289
 290        ASSERT(tp);
 291
 292        /*
 293         * We can do this safely because the context can't checkpoint until we
 294         * are done so it doesn't matter exactly how we update the CIL.
 295         */
 296        xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
 297
 298        /*
 299         * Now (re-)position everything modified at the tail of the CIL.
 300         * We do this here so we only need to take the CIL lock once during
 301         * the transaction commit.
 302         */
 303        spin_lock(&cil->xc_cil_lock);
 304        list_for_each_entry(lidp, &tp->t_items, lid_trans) {
 305                struct xfs_log_item     *lip = lidp->lid_item;
 306
 307                /* Skip items which aren't dirty in this transaction. */
 308                if (!(lidp->lid_flags & XFS_LID_DIRTY))
 309                        continue;
 310
 311                list_move_tail(&lip->li_cil, &cil->xc_cil);
 312        }
 313
 314        /* account for space used by new iovec headers  */
 315        len += diff_iovecs * sizeof(xlog_op_header_t);
 316        ctx->nvecs += diff_iovecs;
 317
 318        /* attach the transaction to the CIL if it has any busy extents */
 319        if (!list_empty(&tp->t_busy))
 320                list_splice_init(&tp->t_busy, &ctx->busy_extents);
 321
 322        /*
 323         * Now transfer enough transaction reservation to the context ticket
 324         * for the checkpoint. The context ticket is special - the unit
 325         * reservation has to grow as well as the current reservation as we
 326         * steal from tickets so we can correctly determine the space used
 327         * during the transaction commit.
 328         */
 329        if (ctx->ticket->t_curr_res == 0) {
 330                ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
 331                tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
 332        }
 333
 334        /* do we need space for more log record headers? */
 335        iclog_space = log->l_iclog_size - log->l_iclog_hsize;
 336        if (len > 0 && (ctx->space_used / iclog_space !=
 337                                (ctx->space_used + len) / iclog_space)) {
 338                int hdrs;
 339
 340                hdrs = (len + iclog_space - 1) / iclog_space;
 341                /* need to take into account split region headers, too */
 342                hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
 343                ctx->ticket->t_unit_res += hdrs;
 344                ctx->ticket->t_curr_res += hdrs;
 345                tp->t_ticket->t_curr_res -= hdrs;
 346                ASSERT(tp->t_ticket->t_curr_res >= len);
 347        }
 348        tp->t_ticket->t_curr_res -= len;
 349        ctx->space_used += len;
 350
 351        spin_unlock(&cil->xc_cil_lock);
 352}
 353
 354static void
 355xlog_cil_free_logvec(
 356        struct xfs_log_vec      *log_vector)
 357{
 358        struct xfs_log_vec      *lv;
 359
 360        for (lv = log_vector; lv; ) {
 361                struct xfs_log_vec *next = lv->lv_next;
 362                kmem_free(lv);
 363                lv = next;
 364        }
 365}
 366
 367/*
 368 * Mark all items committed and clear busy extents. We free the log vector
 369 * chains in a separate pass so that we unpin the log items as quickly as
 370 * possible.
 371 */
 372static void
 373xlog_cil_committed(
 374        void    *args,
 375        int     abort)
 376{
 377        struct xfs_cil_ctx      *ctx = args;
 378        struct xfs_mount        *mp = ctx->cil->xc_log->l_mp;
 379
 380        xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
 381                                        ctx->start_lsn, abort);
 382
 383        xfs_extent_busy_sort(&ctx->busy_extents);
 384        xfs_extent_busy_clear(mp, &ctx->busy_extents,
 385                             (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
 386
 387        /*
 388         * If we are aborting the commit, wake up anyone waiting on the
 389         * committing list.  If we don't, then a shutdown we can leave processes
 390         * waiting in xlog_cil_force_lsn() waiting on a sequence commit that
 391         * will never happen because we aborted it.
 392         */
 393        spin_lock(&ctx->cil->xc_push_lock);
 394        if (abort)
 395                wake_up_all(&ctx->cil->xc_commit_wait);
 396        list_del(&ctx->committing);
 397        spin_unlock(&ctx->cil->xc_push_lock);
 398
 399        xlog_cil_free_logvec(ctx->lv_chain);
 400
 401        if (!list_empty(&ctx->busy_extents)) {
 402                ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
 403
 404                xfs_discard_extents(mp, &ctx->busy_extents);
 405                xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
 406        }
 407
 408        kmem_free(ctx);
 409}
 410
 411/*
 412 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
 413 * is a background flush and so we can chose to ignore it. Otherwise, if the
 414 * current sequence is the same as @push_seq we need to do a flush. If
 415 * @push_seq is less than the current sequence, then it has already been
 416 * flushed and we don't need to do anything - the caller will wait for it to
 417 * complete if necessary.
 418 *
 419 * @push_seq is a value rather than a flag because that allows us to do an
 420 * unlocked check of the sequence number for a match. Hence we can allows log
 421 * forces to run racily and not issue pushes for the same sequence twice. If we
 422 * get a race between multiple pushes for the same sequence they will block on
 423 * the first one and then abort, hence avoiding needless pushes.
 424 */
 425STATIC int
 426xlog_cil_push(
 427        struct xlog             *log)
 428{
 429        struct xfs_cil          *cil = log->l_cilp;
 430        struct xfs_log_vec      *lv;
 431        struct xfs_cil_ctx      *ctx;
 432        struct xfs_cil_ctx      *new_ctx;
 433        struct xlog_in_core     *commit_iclog;
 434        struct xlog_ticket      *tic;
 435        int                     num_iovecs;
 436        int                     error = 0;
 437        struct xfs_trans_header thdr;
 438        struct xfs_log_iovec    lhdr;
 439        struct xfs_log_vec      lvhdr = { NULL };
 440        xfs_lsn_t               commit_lsn;
 441        xfs_lsn_t               push_seq;
 442
 443        if (!cil)
 444                return 0;
 445
 446        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
 447        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 448
 449        down_write(&cil->xc_ctx_lock);
 450        ctx = cil->xc_ctx;
 451
 452        spin_lock(&cil->xc_push_lock);
 453        push_seq = cil->xc_push_seq;
 454        ASSERT(push_seq <= ctx->sequence);
 455
 456        /*
 457         * Check if we've anything to push. If there is nothing, then we don't
 458         * move on to a new sequence number and so we have to be able to push
 459         * this sequence again later.
 460         */
 461        if (list_empty(&cil->xc_cil)) {
 462                cil->xc_push_seq = 0;
 463                spin_unlock(&cil->xc_push_lock);
 464                goto out_skip;
 465        }
 466        spin_unlock(&cil->xc_push_lock);
 467
 468
 469        /* check for a previously pushed seqeunce */
 470        if (push_seq < cil->xc_ctx->sequence)
 471                goto out_skip;
 472
 473        /*
 474         * pull all the log vectors off the items in the CIL, and
 475         * remove the items from the CIL. We don't need the CIL lock
 476         * here because it's only needed on the transaction commit
 477         * side which is currently locked out by the flush lock.
 478         */
 479        lv = NULL;
 480        num_iovecs = 0;
 481        while (!list_empty(&cil->xc_cil)) {
 482                struct xfs_log_item     *item;
 483
 484                item = list_first_entry(&cil->xc_cil,
 485                                        struct xfs_log_item, li_cil);
 486                list_del_init(&item->li_cil);
 487                if (!ctx->lv_chain)
 488                        ctx->lv_chain = item->li_lv;
 489                else
 490                        lv->lv_next = item->li_lv;
 491                lv = item->li_lv;
 492                item->li_lv = NULL;
 493                num_iovecs += lv->lv_niovecs;
 494        }
 495
 496        /*
 497         * initialise the new context and attach it to the CIL. Then attach
 498         * the current context to the CIL committing lsit so it can be found
 499         * during log forces to extract the commit lsn of the sequence that
 500         * needs to be forced.
 501         */
 502        INIT_LIST_HEAD(&new_ctx->committing);
 503        INIT_LIST_HEAD(&new_ctx->busy_extents);
 504        new_ctx->sequence = ctx->sequence + 1;
 505        new_ctx->cil = cil;
 506        cil->xc_ctx = new_ctx;
 507
 508        /*
 509         * The switch is now done, so we can drop the context lock and move out
 510         * of a shared context. We can't just go straight to the commit record,
 511         * though - we need to synchronise with previous and future commits so
 512         * that the commit records are correctly ordered in the log to ensure
 513         * that we process items during log IO completion in the correct order.
 514         *
 515         * For example, if we get an EFI in one checkpoint and the EFD in the
 516         * next (e.g. due to log forces), we do not want the checkpoint with
 517         * the EFD to be committed before the checkpoint with the EFI.  Hence
 518         * we must strictly order the commit records of the checkpoints so
 519         * that: a) the checkpoint callbacks are attached to the iclogs in the
 520         * correct order; and b) the checkpoints are replayed in correct order
 521         * in log recovery.
 522         *
 523         * Hence we need to add this context to the committing context list so
 524         * that higher sequences will wait for us to write out a commit record
 525         * before they do.
 526         *
 527         * xfs_log_force_lsn requires us to mirror the new sequence into the cil
 528         * structure atomically with the addition of this sequence to the
 529         * committing list. This also ensures that we can do unlocked checks
 530         * against the current sequence in log forces without risking
 531         * deferencing a freed context pointer.
 532         */
 533        spin_lock(&cil->xc_push_lock);
 534        cil->xc_current_sequence = new_ctx->sequence;
 535        list_add(&ctx->committing, &cil->xc_committing);
 536        spin_unlock(&cil->xc_push_lock);
 537        up_write(&cil->xc_ctx_lock);
 538
 539        /*
 540         * Build a checkpoint transaction header and write it to the log to
 541         * begin the transaction. We need to account for the space used by the
 542         * transaction header here as it is not accounted for in xlog_write().
 543         *
 544         * The LSN we need to pass to the log items on transaction commit is
 545         * the LSN reported by the first log vector write. If we use the commit
 546         * record lsn then we can move the tail beyond the grant write head.
 547         */
 548        tic = ctx->ticket;
 549        thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
 550        thdr.th_type = XFS_TRANS_CHECKPOINT;
 551        thdr.th_tid = tic->t_tid;
 552        thdr.th_num_items = num_iovecs;
 553        lhdr.i_addr = &thdr;
 554        lhdr.i_len = sizeof(xfs_trans_header_t);
 555        lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
 556        tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
 557
 558        lvhdr.lv_niovecs = 1;
 559        lvhdr.lv_iovecp = &lhdr;
 560        lvhdr.lv_next = ctx->lv_chain;
 561
 562        error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
 563        if (error)
 564                goto out_abort_free_ticket;
 565
 566        /*
 567         * now that we've written the checkpoint into the log, strictly
 568         * order the commit records so replay will get them in the right order.
 569         */
 570restart:
 571        spin_lock(&cil->xc_push_lock);
 572        list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
 573                /*
 574                 * Avoid getting stuck in this loop because we were woken by the
 575                 * shutdown, but then went back to sleep once already in the
 576                 * shutdown state.
 577                 */
 578                if (XLOG_FORCED_SHUTDOWN(log)) {
 579                        spin_unlock(&cil->xc_push_lock);
 580                        goto out_abort_free_ticket;
 581                }
 582
 583                /*
 584                 * Higher sequences will wait for this one so skip them.
 585                 * Don't wait for our own sequence, either.
 586                 */
 587                if (new_ctx->sequence >= ctx->sequence)
 588                        continue;
 589                if (!new_ctx->commit_lsn) {
 590                        /*
 591                         * It is still being pushed! Wait for the push to
 592                         * complete, then start again from the beginning.
 593                         */
 594                        xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 595                        goto restart;
 596                }
 597        }
 598        spin_unlock(&cil->xc_push_lock);
 599
 600        /* xfs_log_done always frees the ticket on error. */
 601        commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
 602        if (commit_lsn == -1)
 603                goto out_abort;
 604
 605        /* attach all the transactions w/ busy extents to iclog */
 606        ctx->log_cb.cb_func = xlog_cil_committed;
 607        ctx->log_cb.cb_arg = ctx;
 608        error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
 609        if (error)
 610                goto out_abort;
 611
 612        /*
 613         * now the checkpoint commit is complete and we've attached the
 614         * callbacks to the iclog we can assign the commit LSN to the context
 615         * and wake up anyone who is waiting for the commit to complete.
 616         */
 617        spin_lock(&cil->xc_push_lock);
 618        ctx->commit_lsn = commit_lsn;
 619        wake_up_all(&cil->xc_commit_wait);
 620        spin_unlock(&cil->xc_push_lock);
 621
 622        /* release the hounds! */
 623        return xfs_log_release_iclog(log->l_mp, commit_iclog);
 624
 625out_skip:
 626        up_write(&cil->xc_ctx_lock);
 627        xfs_log_ticket_put(new_ctx->ticket);
 628        kmem_free(new_ctx);
 629        return 0;
 630
 631out_abort_free_ticket:
 632        xfs_log_ticket_put(tic);
 633out_abort:
 634        xlog_cil_committed(ctx, XFS_LI_ABORTED);
 635        return -EIO;
 636}
 637
 638static void
 639xlog_cil_push_work(
 640        struct work_struct      *work)
 641{
 642        struct xfs_cil          *cil = container_of(work, struct xfs_cil,
 643                                                        xc_push_work);
 644        xlog_cil_push(cil->xc_log);
 645}
 646
 647/*
 648 * We need to push CIL every so often so we don't cache more than we can fit in
 649 * the log. The limit really is that a checkpoint can't be more than half the
 650 * log (the current checkpoint is not allowed to overwrite the previous
 651 * checkpoint), but commit latency and memory usage limit this to a smaller
 652 * size.
 653 */
 654static void
 655xlog_cil_push_background(
 656        struct xlog     *log)
 657{
 658        struct xfs_cil  *cil = log->l_cilp;
 659
 660        /*
 661         * The cil won't be empty because we are called while holding the
 662         * context lock so whatever we added to the CIL will still be there
 663         */
 664        ASSERT(!list_empty(&cil->xc_cil));
 665
 666        /*
 667         * don't do a background push if we haven't used up all the
 668         * space available yet.
 669         */
 670        if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
 671                return;
 672
 673        spin_lock(&cil->xc_push_lock);
 674        if (cil->xc_push_seq < cil->xc_current_sequence) {
 675                cil->xc_push_seq = cil->xc_current_sequence;
 676                queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
 677        }
 678        spin_unlock(&cil->xc_push_lock);
 679
 680}
 681
 682/*
 683 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
 684 * number that is passed. When it returns, the work will be queued for
 685 * @push_seq, but it won't be completed. The caller is expected to do any
 686 * waiting for push_seq to complete if it is required.
 687 */
 688static void
 689xlog_cil_push_now(
 690        struct xlog     *log,
 691        xfs_lsn_t       push_seq)
 692{
 693        struct xfs_cil  *cil = log->l_cilp;
 694
 695        if (!cil)
 696                return;
 697
 698        ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
 699
 700        /* start on any pending background push to minimise wait time on it */
 701        flush_work(&cil->xc_push_work);
 702
 703        /*
 704         * If the CIL is empty or we've already pushed the sequence then
 705         * there's no work we need to do.
 706         */
 707        spin_lock(&cil->xc_push_lock);
 708        if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
 709                spin_unlock(&cil->xc_push_lock);
 710                return;
 711        }
 712
 713        cil->xc_push_seq = push_seq;
 714        queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
 715        spin_unlock(&cil->xc_push_lock);
 716}
 717
 718bool
 719xlog_cil_empty(
 720        struct xlog     *log)
 721{
 722        struct xfs_cil  *cil = log->l_cilp;
 723        bool            empty = false;
 724
 725        spin_lock(&cil->xc_push_lock);
 726        if (list_empty(&cil->xc_cil))
 727                empty = true;
 728        spin_unlock(&cil->xc_push_lock);
 729        return empty;
 730}
 731
 732/*
 733 * Commit a transaction with the given vector to the Committed Item List.
 734 *
 735 * To do this, we need to format the item, pin it in memory if required and
 736 * account for the space used by the transaction. Once we have done that we
 737 * need to release the unused reservation for the transaction, attach the
 738 * transaction to the checkpoint context so we carry the busy extents through
 739 * to checkpoint completion, and then unlock all the items in the transaction.
 740 *
 741 * Called with the context lock already held in read mode to lock out
 742 * background commit, returns without it held once background commits are
 743 * allowed again.
 744 */
 745void
 746xfs_log_commit_cil(
 747        struct xfs_mount        *mp,
 748        struct xfs_trans        *tp,
 749        xfs_lsn_t               *commit_lsn,
 750        int                     flags)
 751{
 752        struct xlog             *log = mp->m_log;
 753        struct xfs_cil          *cil = log->l_cilp;
 754        int                     log_flags = 0;
 755
 756        if (flags & XFS_TRANS_RELEASE_LOG_RES)
 757                log_flags = XFS_LOG_REL_PERM_RESERV;
 758
 759        /* lock out background commit */
 760        down_read(&cil->xc_ctx_lock);
 761
 762        xlog_cil_insert_items(log, tp);
 763
 764        /* check we didn't blow the reservation */
 765        if (tp->t_ticket->t_curr_res < 0)
 766                xlog_print_tic_res(mp, tp->t_ticket);
 767
 768        tp->t_commit_lsn = cil->xc_ctx->sequence;
 769        if (commit_lsn)
 770                *commit_lsn = tp->t_commit_lsn;
 771
 772        xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
 773        xfs_trans_unreserve_and_mod_sb(tp);
 774
 775        /*
 776         * Once all the items of the transaction have been copied to the CIL,
 777         * the items can be unlocked and freed.
 778         *
 779         * This needs to be done before we drop the CIL context lock because we
 780         * have to update state in the log items and unlock them before they go
 781         * to disk. If we don't, then the CIL checkpoint can race with us and
 782         * we can run checkpoint completion before we've updated and unlocked
 783         * the log items. This affects (at least) processing of stale buffers,
 784         * inodes and EFIs.
 785         */
 786        xfs_trans_free_items(tp, tp->t_commit_lsn, 0);
 787
 788        xlog_cil_push_background(log);
 789
 790        up_read(&cil->xc_ctx_lock);
 791}
 792
 793/*
 794 * Conditionally push the CIL based on the sequence passed in.
 795 *
 796 * We only need to push if we haven't already pushed the sequence
 797 * number given. Hence the only time we will trigger a push here is
 798 * if the push sequence is the same as the current context.
 799 *
 800 * We return the current commit lsn to allow the callers to determine if a
 801 * iclog flush is necessary following this call.
 802 */
 803xfs_lsn_t
 804xlog_cil_force_lsn(
 805        struct xlog     *log,
 806        xfs_lsn_t       sequence)
 807{
 808        struct xfs_cil          *cil = log->l_cilp;
 809        struct xfs_cil_ctx      *ctx;
 810        xfs_lsn_t               commit_lsn = NULLCOMMITLSN;
 811
 812        ASSERT(sequence <= cil->xc_current_sequence);
 813
 814        /*
 815         * check to see if we need to force out the current context.
 816         * xlog_cil_push() handles racing pushes for the same sequence,
 817         * so no need to deal with it here.
 818         */
 819restart:
 820        xlog_cil_push_now(log, sequence);
 821
 822        /*
 823         * See if we can find a previous sequence still committing.
 824         * We need to wait for all previous sequence commits to complete
 825         * before allowing the force of push_seq to go ahead. Hence block
 826         * on commits for those as well.
 827         */
 828        spin_lock(&cil->xc_push_lock);
 829        list_for_each_entry(ctx, &cil->xc_committing, committing) {
 830                /*
 831                 * Avoid getting stuck in this loop because we were woken by the
 832                 * shutdown, but then went back to sleep once already in the
 833                 * shutdown state.
 834                 */
 835                if (XLOG_FORCED_SHUTDOWN(log))
 836                        goto out_shutdown;
 837                if (ctx->sequence > sequence)
 838                        continue;
 839                if (!ctx->commit_lsn) {
 840                        /*
 841                         * It is still being pushed! Wait for the push to
 842                         * complete, then start again from the beginning.
 843                         */
 844                        xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
 845                        goto restart;
 846                }
 847                if (ctx->sequence != sequence)
 848                        continue;
 849                /* found it! */
 850                commit_lsn = ctx->commit_lsn;
 851        }
 852
 853        /*
 854         * The call to xlog_cil_push_now() executes the push in the background.
 855         * Hence by the time we have got here it our sequence may not have been
 856         * pushed yet. This is true if the current sequence still matches the
 857         * push sequence after the above wait loop and the CIL still contains
 858         * dirty objects.
 859         *
 860         * When the push occurs, it will empty the CIL and atomically increment
 861         * the currect sequence past the push sequence and move it into the
 862         * committing list. Of course, if the CIL is clean at the time of the
 863         * push, it won't have pushed the CIL at all, so in that case we should
 864         * try the push for this sequence again from the start just in case.
 865         */
 866        if (sequence == cil->xc_current_sequence &&
 867            !list_empty(&cil->xc_cil)) {
 868                spin_unlock(&cil->xc_push_lock);
 869                goto restart;
 870        }
 871
 872        spin_unlock(&cil->xc_push_lock);
 873        return commit_lsn;
 874
 875        /*
 876         * We detected a shutdown in progress. We need to trigger the log force
 877         * to pass through it's iclog state machine error handling, even though
 878         * we are already in a shutdown state. Hence we can't return
 879         * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
 880         * LSN is already stable), so we return a zero LSN instead.
 881         */
 882out_shutdown:
 883        spin_unlock(&cil->xc_push_lock);
 884        return 0;
 885}
 886
 887/*
 888 * Check if the current log item was first committed in this sequence.
 889 * We can't rely on just the log item being in the CIL, we have to check
 890 * the recorded commit sequence number.
 891 *
 892 * Note: for this to be used in a non-racy manner, it has to be called with
 893 * CIL flushing locked out. As a result, it should only be used during the
 894 * transaction commit process when deciding what to format into the item.
 895 */
 896bool
 897xfs_log_item_in_current_chkpt(
 898        struct xfs_log_item *lip)
 899{
 900        struct xfs_cil_ctx *ctx;
 901
 902        if (list_empty(&lip->li_cil))
 903                return false;
 904
 905        ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
 906
 907        /*
 908         * li_seq is written on the first commit of a log item to record the
 909         * first checkpoint it is written to. Hence if it is different to the
 910         * current sequence, we're in a new checkpoint.
 911         */
 912        if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
 913                return false;
 914        return true;
 915}
 916
 917/*
 918 * Perform initial CIL structure initialisation.
 919 */
 920int
 921xlog_cil_init(
 922        struct xlog     *log)
 923{
 924        struct xfs_cil  *cil;
 925        struct xfs_cil_ctx *ctx;
 926
 927        cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
 928        if (!cil)
 929                return -ENOMEM;
 930
 931        ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
 932        if (!ctx) {
 933                kmem_free(cil);
 934                return -ENOMEM;
 935        }
 936
 937        INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
 938        INIT_LIST_HEAD(&cil->xc_cil);
 939        INIT_LIST_HEAD(&cil->xc_committing);
 940        spin_lock_init(&cil->xc_cil_lock);
 941        spin_lock_init(&cil->xc_push_lock);
 942        init_rwsem(&cil->xc_ctx_lock);
 943        init_waitqueue_head(&cil->xc_commit_wait);
 944
 945        INIT_LIST_HEAD(&ctx->committing);
 946        INIT_LIST_HEAD(&ctx->busy_extents);
 947        ctx->sequence = 1;
 948        ctx->cil = cil;
 949        cil->xc_ctx = ctx;
 950        cil->xc_current_sequence = ctx->sequence;
 951
 952        cil->xc_log = log;
 953        log->l_cilp = cil;
 954        return 0;
 955}
 956
 957void
 958xlog_cil_destroy(
 959        struct xlog     *log)
 960{
 961        if (log->l_cilp->xc_ctx) {
 962                if (log->l_cilp->xc_ctx->ticket)
 963                        xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
 964                kmem_free(log->l_cilp->xc_ctx);
 965        }
 966
 967        ASSERT(list_empty(&log->l_cilp->xc_cil));
 968        kmem_free(log->l_cilp);
 969}
 970
 971