linux/fs/jfs/jfs_txnmgr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *   Copyright (C) International Business Machines Corp., 2000-2005
   4 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
   5 */
   6
   7/*
   8 *      jfs_txnmgr.c: transaction manager
   9 *
  10 * notes:
  11 * transaction starts with txBegin() and ends with txCommit()
  12 * or txAbort().
  13 *
  14 * tlock is acquired at the time of update;
  15 * (obviate scan at commit time for xtree and dtree)
  16 * tlock and mp points to each other;
  17 * (no hashlist for mp -> tlock).
  18 *
  19 * special cases:
  20 * tlock on in-memory inode:
  21 * in-place tlock in the in-memory inode itself;
  22 * converted to page lock by iWrite() at commit time.
  23 *
  24 * tlock during write()/mmap() under anonymous transaction (tid = 0):
  25 * transferred (?) to transaction at commit time.
  26 *
  27 * use the page itself to update allocation maps
  28 * (obviate intermediate replication of allocation/deallocation data)
  29 * hold on to mp+lock thru update of maps
  30 */
  31
  32#include <linux/fs.h>
  33#include <linux/vmalloc.h>
  34#include <linux/completion.h>
  35#include <linux/freezer.h>
  36#include <linux/module.h>
  37#include <linux/moduleparam.h>
  38#include <linux/kthread.h>
  39#include <linux/seq_file.h>
  40#include "jfs_incore.h"
  41#include "jfs_inode.h"
  42#include "jfs_filsys.h"
  43#include "jfs_metapage.h"
  44#include "jfs_dinode.h"
  45#include "jfs_imap.h"
  46#include "jfs_dmap.h"
  47#include "jfs_superblock.h"
  48#include "jfs_debug.h"
  49
  50/*
  51 *      transaction management structures
  52 */
  53static struct {
  54        int freetid;            /* index of a free tid structure */
  55        int freelock;           /* index first free lock word */
  56        wait_queue_head_t freewait;     /* eventlist of free tblock */
  57        wait_queue_head_t freelockwait; /* eventlist of free tlock */
  58        wait_queue_head_t lowlockwait;  /* eventlist of ample tlocks */
  59        int tlocksInUse;        /* Number of tlocks in use */
  60        spinlock_t LazyLock;    /* synchronize sync_queue & unlock_queue */
  61/*      struct tblock *sync_queue; * Transactions waiting for data sync */
  62        struct list_head unlock_queue;  /* Txns waiting to be released */
  63        struct list_head anon_list;     /* inodes having anonymous txns */
  64        struct list_head anon_list2;    /* inodes having anonymous txns
  65                                           that couldn't be sync'ed */
  66} TxAnchor;
  67
  68int jfs_tlocks_low;             /* Indicates low number of available tlocks */
  69
  70#ifdef CONFIG_JFS_STATISTICS
  71static struct {
  72        uint txBegin;
  73        uint txBegin_barrier;
  74        uint txBegin_lockslow;
  75        uint txBegin_freetid;
  76        uint txBeginAnon;
  77        uint txBeginAnon_barrier;
  78        uint txBeginAnon_lockslow;
  79        uint txLockAlloc;
  80        uint txLockAlloc_freelock;
  81} TxStat;
  82#endif
  83
  84static int nTxBlock = -1;       /* number of transaction blocks */
  85module_param(nTxBlock, int, 0);
  86MODULE_PARM_DESC(nTxBlock,
  87                 "Number of transaction blocks (max:65536)");
  88
  89static int nTxLock = -1;        /* number of transaction locks */
  90module_param(nTxLock, int, 0);
  91MODULE_PARM_DESC(nTxLock,
  92                 "Number of transaction locks (max:65536)");
  93
  94struct tblock *TxBlock; /* transaction block table */
  95static int TxLockLWM;   /* Low water mark for number of txLocks used */
  96static int TxLockHWM;   /* High water mark for number of txLocks used */
  97static int TxLockVHWM;  /* Very High water mark */
  98struct tlock *TxLock;   /* transaction lock table */
  99
 100/*
 101 *      transaction management lock
 102 */
 103static DEFINE_SPINLOCK(jfsTxnLock);
 104
 105#define TXN_LOCK()              spin_lock(&jfsTxnLock)
 106#define TXN_UNLOCK()            spin_unlock(&jfsTxnLock)
 107
 108#define LAZY_LOCK_INIT()        spin_lock_init(&TxAnchor.LazyLock)
 109#define LAZY_LOCK(flags)        spin_lock_irqsave(&TxAnchor.LazyLock, flags)
 110#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
 111
 112static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
 113static int jfs_commit_thread_waking;
 114
 115/*
 116 * Retry logic exist outside these macros to protect from spurrious wakeups.
 117 */
 118static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
 119{
 120        DECLARE_WAITQUEUE(wait, current);
 121
 122        add_wait_queue(event, &wait);
 123        set_current_state(TASK_UNINTERRUPTIBLE);
 124        TXN_UNLOCK();
 125        io_schedule();
 126        remove_wait_queue(event, &wait);
 127}
 128
 129#define TXN_SLEEP(event)\
 130{\
 131        TXN_SLEEP_DROP_LOCK(event);\
 132        TXN_LOCK();\
 133}
 134
 135#define TXN_WAKEUP(event) wake_up_all(event)
 136
 137/*
 138 *      statistics
 139 */
 140static struct {
 141        tid_t maxtid;           /* 4: biggest tid ever used */
 142        lid_t maxlid;           /* 4: biggest lid ever used */
 143        int ntid;               /* 4: # of transactions performed */
 144        int nlid;               /* 4: # of tlocks acquired */
 145        int waitlock;           /* 4: # of tlock wait */
 146} stattx;
 147
 148/*
 149 * forward references
 150 */
 151static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
 152                struct tlock *tlck, struct commit *cd);
 153static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
 154                struct tlock *tlck);
 155static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 156                struct tlock * tlck);
 157static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 158                struct tlock * tlck);
 159static void txAllocPMap(struct inode *ip, struct maplock * maplock,
 160                struct tblock * tblk);
 161static void txForce(struct tblock * tblk);
 162static void txLog(struct jfs_log *log, struct tblock *tblk,
 163                struct commit *cd);
 164static void txUpdateMap(struct tblock * tblk);
 165static void txRelease(struct tblock * tblk);
 166static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 167           struct tlock * tlck);
 168static void LogSyncRelease(struct metapage * mp);
 169
 170/*
 171 *              transaction block/lock management
 172 *              ---------------------------------
 173 */
 174
 175/*
 176 * Get a transaction lock from the free list.  If the number in use is
 177 * greater than the high water mark, wake up the sync daemon.  This should
 178 * free some anonymous transaction locks.  (TXN_LOCK must be held.)
 179 */
 180static lid_t txLockAlloc(void)
 181{
 182        lid_t lid;
 183
 184        INCREMENT(TxStat.txLockAlloc);
 185        if (!TxAnchor.freelock) {
 186                INCREMENT(TxStat.txLockAlloc_freelock);
 187        }
 188
 189        while (!(lid = TxAnchor.freelock))
 190                TXN_SLEEP(&TxAnchor.freelockwait);
 191        TxAnchor.freelock = TxLock[lid].next;
 192        HIGHWATERMARK(stattx.maxlid, lid);
 193        if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
 194                jfs_info("txLockAlloc tlocks low");
 195                jfs_tlocks_low = 1;
 196                wake_up_process(jfsSyncThread);
 197        }
 198
 199        return lid;
 200}
 201
 202static void txLockFree(lid_t lid)
 203{
 204        TxLock[lid].tid = 0;
 205        TxLock[lid].next = TxAnchor.freelock;
 206        TxAnchor.freelock = lid;
 207        TxAnchor.tlocksInUse--;
 208        if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
 209                jfs_info("txLockFree jfs_tlocks_low no more");
 210                jfs_tlocks_low = 0;
 211                TXN_WAKEUP(&TxAnchor.lowlockwait);
 212        }
 213        TXN_WAKEUP(&TxAnchor.freelockwait);
 214}
 215
 216/*
 217 * NAME:        txInit()
 218 *
 219 * FUNCTION:    initialize transaction management structures
 220 *
 221 * RETURN:
 222 *
 223 * serialization: single thread at jfs_init()
 224 */
 225int txInit(void)
 226{
 227        int k, size;
 228        struct sysinfo si;
 229
 230        /* Set defaults for nTxLock and nTxBlock if unset */
 231
 232        if (nTxLock == -1) {
 233                if (nTxBlock == -1) {
 234                        /* Base default on memory size */
 235                        si_meminfo(&si);
 236                        if (si.totalram > (256 * 1024)) /* 1 GB */
 237                                nTxLock = 64 * 1024;
 238                        else
 239                                nTxLock = si.totalram >> 2;
 240                } else if (nTxBlock > (8 * 1024))
 241                        nTxLock = 64 * 1024;
 242                else
 243                        nTxLock = nTxBlock << 3;
 244        }
 245        if (nTxBlock == -1)
 246                nTxBlock = nTxLock >> 3;
 247
 248        /* Verify tunable parameters */
 249        if (nTxBlock < 16)
 250                nTxBlock = 16;  /* No one should set it this low */
 251        if (nTxBlock > 65536)
 252                nTxBlock = 65536;
 253        if (nTxLock < 256)
 254                nTxLock = 256;  /* No one should set it this low */
 255        if (nTxLock > 65536)
 256                nTxLock = 65536;
 257
 258        printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
 259               nTxBlock, nTxLock);
 260        /*
 261         * initialize transaction block (tblock) table
 262         *
 263         * transaction id (tid) = tblock index
 264         * tid = 0 is reserved.
 265         */
 266        TxLockLWM = (nTxLock * 4) / 10;
 267        TxLockHWM = (nTxLock * 7) / 10;
 268        TxLockVHWM = (nTxLock * 8) / 10;
 269
 270        size = sizeof(struct tblock) * nTxBlock;
 271        TxBlock = vmalloc(size);
 272        if (TxBlock == NULL)
 273                return -ENOMEM;
 274
 275        for (k = 1; k < nTxBlock - 1; k++) {
 276                TxBlock[k].next = k + 1;
 277                init_waitqueue_head(&TxBlock[k].gcwait);
 278                init_waitqueue_head(&TxBlock[k].waitor);
 279        }
 280        TxBlock[k].next = 0;
 281        init_waitqueue_head(&TxBlock[k].gcwait);
 282        init_waitqueue_head(&TxBlock[k].waitor);
 283
 284        TxAnchor.freetid = 1;
 285        init_waitqueue_head(&TxAnchor.freewait);
 286
 287        stattx.maxtid = 1;      /* statistics */
 288
 289        /*
 290         * initialize transaction lock (tlock) table
 291         *
 292         * transaction lock id = tlock index
 293         * tlock id = 0 is reserved.
 294         */
 295        size = sizeof(struct tlock) * nTxLock;
 296        TxLock = vmalloc(size);
 297        if (TxLock == NULL) {
 298                vfree(TxBlock);
 299                return -ENOMEM;
 300        }
 301
 302        /* initialize tlock table */
 303        for (k = 1; k < nTxLock - 1; k++)
 304                TxLock[k].next = k + 1;
 305        TxLock[k].next = 0;
 306        init_waitqueue_head(&TxAnchor.freelockwait);
 307        init_waitqueue_head(&TxAnchor.lowlockwait);
 308
 309        TxAnchor.freelock = 1;
 310        TxAnchor.tlocksInUse = 0;
 311        INIT_LIST_HEAD(&TxAnchor.anon_list);
 312        INIT_LIST_HEAD(&TxAnchor.anon_list2);
 313
 314        LAZY_LOCK_INIT();
 315        INIT_LIST_HEAD(&TxAnchor.unlock_queue);
 316
 317        stattx.maxlid = 1;      /* statistics */
 318
 319        return 0;
 320}
 321
 322/*
 323 * NAME:        txExit()
 324 *
 325 * FUNCTION:    clean up when module is unloaded
 326 */
 327void txExit(void)
 328{
 329        vfree(TxLock);
 330        TxLock = NULL;
 331        vfree(TxBlock);
 332        TxBlock = NULL;
 333}
 334
 335/*
 336 * NAME:        txBegin()
 337 *
 338 * FUNCTION:    start a transaction.
 339 *
 340 * PARAMETER:   sb      - superblock
 341 *              flag    - force for nested tx;
 342 *
 343 * RETURN:      tid     - transaction id
 344 *
 345 * note: flag force allows to start tx for nested tx
 346 * to prevent deadlock on logsync barrier;
 347 */
 348tid_t txBegin(struct super_block *sb, int flag)
 349{
 350        tid_t t;
 351        struct tblock *tblk;
 352        struct jfs_log *log;
 353
 354        jfs_info("txBegin: flag = 0x%x", flag);
 355        log = JFS_SBI(sb)->log;
 356
 357        TXN_LOCK();
 358
 359        INCREMENT(TxStat.txBegin);
 360
 361      retry:
 362        if (!(flag & COMMIT_FORCE)) {
 363                /*
 364                 * synchronize with logsync barrier
 365                 */
 366                if (test_bit(log_SYNCBARRIER, &log->flag) ||
 367                    test_bit(log_QUIESCE, &log->flag)) {
 368                        INCREMENT(TxStat.txBegin_barrier);
 369                        TXN_SLEEP(&log->syncwait);
 370                        goto retry;
 371                }
 372        }
 373        if (flag == 0) {
 374                /*
 375                 * Don't begin transaction if we're getting starved for tlocks
 376                 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
 377                 * free tlocks)
 378                 */
 379                if (TxAnchor.tlocksInUse > TxLockVHWM) {
 380                        INCREMENT(TxStat.txBegin_lockslow);
 381                        TXN_SLEEP(&TxAnchor.lowlockwait);
 382                        goto retry;
 383                }
 384        }
 385
 386        /*
 387         * allocate transaction id/block
 388         */
 389        if ((t = TxAnchor.freetid) == 0) {
 390                jfs_info("txBegin: waiting for free tid");
 391                INCREMENT(TxStat.txBegin_freetid);
 392                TXN_SLEEP(&TxAnchor.freewait);
 393                goto retry;
 394        }
 395
 396        tblk = tid_to_tblock(t);
 397
 398        if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
 399                /* Don't let a non-forced transaction take the last tblk */
 400                jfs_info("txBegin: waiting for free tid");
 401                INCREMENT(TxStat.txBegin_freetid);
 402                TXN_SLEEP(&TxAnchor.freewait);
 403                goto retry;
 404        }
 405
 406        TxAnchor.freetid = tblk->next;
 407
 408        /*
 409         * initialize transaction
 410         */
 411
 412        /*
 413         * We can't zero the whole thing or we screw up another thread being
 414         * awakened after sleeping on tblk->waitor
 415         *
 416         * memset(tblk, 0, sizeof(struct tblock));
 417         */
 418        tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
 419
 420        tblk->sb = sb;
 421        ++log->logtid;
 422        tblk->logtid = log->logtid;
 423
 424        ++log->active;
 425
 426        HIGHWATERMARK(stattx.maxtid, t);        /* statistics */
 427        INCREMENT(stattx.ntid); /* statistics */
 428
 429        TXN_UNLOCK();
 430
 431        jfs_info("txBegin: returning tid = %d", t);
 432
 433        return t;
 434}
 435
 436/*
 437 * NAME:        txBeginAnon()
 438 *
 439 * FUNCTION:    start an anonymous transaction.
 440 *              Blocks if logsync or available tlocks are low to prevent
 441 *              anonymous tlocks from depleting supply.
 442 *
 443 * PARAMETER:   sb      - superblock
 444 *
 445 * RETURN:      none
 446 */
 447void txBeginAnon(struct super_block *sb)
 448{
 449        struct jfs_log *log;
 450
 451        log = JFS_SBI(sb)->log;
 452
 453        TXN_LOCK();
 454        INCREMENT(TxStat.txBeginAnon);
 455
 456      retry:
 457        /*
 458         * synchronize with logsync barrier
 459         */
 460        if (test_bit(log_SYNCBARRIER, &log->flag) ||
 461            test_bit(log_QUIESCE, &log->flag)) {
 462                INCREMENT(TxStat.txBeginAnon_barrier);
 463                TXN_SLEEP(&log->syncwait);
 464                goto retry;
 465        }
 466
 467        /*
 468         * Don't begin transaction if we're getting starved for tlocks
 469         */
 470        if (TxAnchor.tlocksInUse > TxLockVHWM) {
 471                INCREMENT(TxStat.txBeginAnon_lockslow);
 472                TXN_SLEEP(&TxAnchor.lowlockwait);
 473                goto retry;
 474        }
 475        TXN_UNLOCK();
 476}
 477
 478/*
 479 *      txEnd()
 480 *
 481 * function: free specified transaction block.
 482 *
 483 *      logsync barrier processing:
 484 *
 485 * serialization:
 486 */
 487void txEnd(tid_t tid)
 488{
 489        struct tblock *tblk = tid_to_tblock(tid);
 490        struct jfs_log *log;
 491
 492        jfs_info("txEnd: tid = %d", tid);
 493        TXN_LOCK();
 494
 495        /*
 496         * wakeup transactions waiting on the page locked
 497         * by the current transaction
 498         */
 499        TXN_WAKEUP(&tblk->waitor);
 500
 501        log = JFS_SBI(tblk->sb)->log;
 502
 503        /*
 504         * Lazy commit thread can't free this guy until we mark it UNLOCKED,
 505         * otherwise, we would be left with a transaction that may have been
 506         * reused.
 507         *
 508         * Lazy commit thread will turn off tblkGC_LAZY before calling this
 509         * routine.
 510         */
 511        if (tblk->flag & tblkGC_LAZY) {
 512                jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
 513                TXN_UNLOCK();
 514
 515                spin_lock_irq(&log->gclock);    // LOGGC_LOCK
 516                tblk->flag |= tblkGC_UNLOCKED;
 517                spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
 518                return;
 519        }
 520
 521        jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
 522
 523        assert(tblk->next == 0);
 524
 525        /*
 526         * insert tblock back on freelist
 527         */
 528        tblk->next = TxAnchor.freetid;
 529        TxAnchor.freetid = tid;
 530
 531        /*
 532         * mark the tblock not active
 533         */
 534        if (--log->active == 0) {
 535                clear_bit(log_FLUSH, &log->flag);
 536
 537                /*
 538                 * synchronize with logsync barrier
 539                 */
 540                if (test_bit(log_SYNCBARRIER, &log->flag)) {
 541                        TXN_UNLOCK();
 542
 543                        /* write dirty metadata & forward log syncpt */
 544                        jfs_syncpt(log, 1);
 545
 546                        jfs_info("log barrier off: 0x%x", log->lsn);
 547
 548                        /* enable new transactions start */
 549                        clear_bit(log_SYNCBARRIER, &log->flag);
 550
 551                        /* wakeup all waitors for logsync barrier */
 552                        TXN_WAKEUP(&log->syncwait);
 553
 554                        goto wakeup;
 555                }
 556        }
 557
 558        TXN_UNLOCK();
 559wakeup:
 560        /*
 561         * wakeup all waitors for a free tblock
 562         */
 563        TXN_WAKEUP(&TxAnchor.freewait);
 564}
 565
 566/*
 567 *      txLock()
 568 *
 569 * function: acquire a transaction lock on the specified <mp>
 570 *
 571 * parameter:
 572 *
 573 * return:      transaction lock id
 574 *
 575 * serialization:
 576 */
 577struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
 578                     int type)
 579{
 580        struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 581        int dir_xtree = 0;
 582        lid_t lid;
 583        tid_t xtid;
 584        struct tlock *tlck;
 585        struct xtlock *xtlck;
 586        struct linelock *linelock;
 587        xtpage_t *p;
 588        struct tblock *tblk;
 589
 590        TXN_LOCK();
 591
 592        if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
 593            !(mp->xflag & COMMIT_PAGE)) {
 594                /*
 595                 * Directory inode is special.  It can have both an xtree tlock
 596                 * and a dtree tlock associated with it.
 597                 */
 598                dir_xtree = 1;
 599                lid = jfs_ip->xtlid;
 600        } else
 601                lid = mp->lid;
 602
 603        /* is page not locked by a transaction ? */
 604        if (lid == 0)
 605                goto allocateLock;
 606
 607        jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
 608
 609        /* is page locked by the requester transaction ? */
 610        tlck = lid_to_tlock(lid);
 611        if ((xtid = tlck->tid) == tid) {
 612                TXN_UNLOCK();
 613                goto grantLock;
 614        }
 615
 616        /*
 617         * is page locked by anonymous transaction/lock ?
 618         *
 619         * (page update without transaction (i.e., file write) is
 620         * locked under anonymous transaction tid = 0:
 621         * anonymous tlocks maintained on anonymous tlock list of
 622         * the inode of the page and available to all anonymous
 623         * transactions until txCommit() time at which point
 624         * they are transferred to the transaction tlock list of
 625         * the committing transaction of the inode)
 626         */
 627        if (xtid == 0) {
 628                tlck->tid = tid;
 629                TXN_UNLOCK();
 630                tblk = tid_to_tblock(tid);
 631                /*
 632                 * The order of the tlocks in the transaction is important
 633                 * (during truncate, child xtree pages must be freed before
 634                 * parent's tlocks change the working map).
 635                 * Take tlock off anonymous list and add to tail of
 636                 * transaction list
 637                 *
 638                 * Note:  We really need to get rid of the tid & lid and
 639                 * use list_head's.  This code is getting UGLY!
 640                 */
 641                if (jfs_ip->atlhead == lid) {
 642                        if (jfs_ip->atltail == lid) {
 643                                /* only anonymous txn.
 644                                 * Remove from anon_list
 645                                 */
 646                                TXN_LOCK();
 647                                list_del_init(&jfs_ip->anon_inode_list);
 648                                TXN_UNLOCK();
 649                        }
 650                        jfs_ip->atlhead = tlck->next;
 651                } else {
 652                        lid_t last;
 653                        for (last = jfs_ip->atlhead;
 654                             lid_to_tlock(last)->next != lid;
 655                             last = lid_to_tlock(last)->next) {
 656                                assert(last);
 657                        }
 658                        lid_to_tlock(last)->next = tlck->next;
 659                        if (jfs_ip->atltail == lid)
 660                                jfs_ip->atltail = last;
 661                }
 662
 663                /* insert the tlock at tail of transaction tlock list */
 664
 665                if (tblk->next)
 666                        lid_to_tlock(tblk->last)->next = lid;
 667                else
 668                        tblk->next = lid;
 669                tlck->next = 0;
 670                tblk->last = lid;
 671
 672                goto grantLock;
 673        }
 674
 675        goto waitLock;
 676
 677        /*
 678         * allocate a tlock
 679         */
 680      allocateLock:
 681        lid = txLockAlloc();
 682        tlck = lid_to_tlock(lid);
 683
 684        /*
 685         * initialize tlock
 686         */
 687        tlck->tid = tid;
 688
 689        TXN_UNLOCK();
 690
 691        /* mark tlock for meta-data page */
 692        if (mp->xflag & COMMIT_PAGE) {
 693
 694                tlck->flag = tlckPAGELOCK;
 695
 696                /* mark the page dirty and nohomeok */
 697                metapage_nohomeok(mp);
 698
 699                jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
 700                         mp, mp->nohomeok, tid, tlck);
 701
 702                /* if anonymous transaction, and buffer is on the group
 703                 * commit synclist, mark inode to show this.  This will
 704                 * prevent the buffer from being marked nohomeok for too
 705                 * long a time.
 706                 */
 707                if ((tid == 0) && mp->lsn)
 708                        set_cflag(COMMIT_Synclist, ip);
 709        }
 710        /* mark tlock for in-memory inode */
 711        else
 712                tlck->flag = tlckINODELOCK;
 713
 714        if (S_ISDIR(ip->i_mode))
 715                tlck->flag |= tlckDIRECTORY;
 716
 717        tlck->type = 0;
 718
 719        /* bind the tlock and the page */
 720        tlck->ip = ip;
 721        tlck->mp = mp;
 722        if (dir_xtree)
 723                jfs_ip->xtlid = lid;
 724        else
 725                mp->lid = lid;
 726
 727        /*
 728         * enqueue transaction lock to transaction/inode
 729         */
 730        /* insert the tlock at tail of transaction tlock list */
 731        if (tid) {
 732                tblk = tid_to_tblock(tid);
 733                if (tblk->next)
 734                        lid_to_tlock(tblk->last)->next = lid;
 735                else
 736                        tblk->next = lid;
 737                tlck->next = 0;
 738                tblk->last = lid;
 739        }
 740        /* anonymous transaction:
 741         * insert the tlock at head of inode anonymous tlock list
 742         */
 743        else {
 744                tlck->next = jfs_ip->atlhead;
 745                jfs_ip->atlhead = lid;
 746                if (tlck->next == 0) {
 747                        /* This inode's first anonymous transaction */
 748                        jfs_ip->atltail = lid;
 749                        TXN_LOCK();
 750                        list_add_tail(&jfs_ip->anon_inode_list,
 751                                      &TxAnchor.anon_list);
 752                        TXN_UNLOCK();
 753                }
 754        }
 755
 756        /* initialize type dependent area for linelock */
 757        linelock = (struct linelock *) & tlck->lock;
 758        linelock->next = 0;
 759        linelock->flag = tlckLINELOCK;
 760        linelock->maxcnt = TLOCKSHORT;
 761        linelock->index = 0;
 762
 763        switch (type & tlckTYPE) {
 764        case tlckDTREE:
 765                linelock->l2linesize = L2DTSLOTSIZE;
 766                break;
 767
 768        case tlckXTREE:
 769                linelock->l2linesize = L2XTSLOTSIZE;
 770
 771                xtlck = (struct xtlock *) linelock;
 772                xtlck->header.offset = 0;
 773                xtlck->header.length = 2;
 774
 775                if (type & tlckNEW) {
 776                        xtlck->lwm.offset = XTENTRYSTART;
 777                } else {
 778                        if (mp->xflag & COMMIT_PAGE)
 779                                p = (xtpage_t *) mp->data;
 780                        else
 781                                p = &jfs_ip->i_xtroot;
 782                        xtlck->lwm.offset =
 783                            le16_to_cpu(p->header.nextindex);
 784                }
 785                xtlck->lwm.length = 0;  /* ! */
 786                xtlck->twm.offset = 0;
 787                xtlck->hwm.offset = 0;
 788
 789                xtlck->index = 2;
 790                break;
 791
 792        case tlckINODE:
 793                linelock->l2linesize = L2INODESLOTSIZE;
 794                break;
 795
 796        case tlckDATA:
 797                linelock->l2linesize = L2DATASLOTSIZE;
 798                break;
 799
 800        default:
 801                jfs_err("UFO tlock:0x%p", tlck);
 802        }
 803
 804        /*
 805         * update tlock vector
 806         */
 807      grantLock:
 808        tlck->type |= type;
 809
 810        return tlck;
 811
 812        /*
 813         * page is being locked by another transaction:
 814         */
 815      waitLock:
 816        /* Only locks on ipimap or ipaimap should reach here */
 817        /* assert(jfs_ip->fileset == AGGREGATE_I); */
 818        if (jfs_ip->fileset != AGGREGATE_I) {
 819                printk(KERN_ERR "txLock: trying to lock locked page!");
 820                print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
 821                               ip, sizeof(*ip), 0);
 822                print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
 823                               mp, sizeof(*mp), 0);
 824                print_hex_dump(KERN_ERR, "Locker's tblock: ",
 825                               DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
 826                               sizeof(struct tblock), 0);
 827                print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
 828                               tlck, sizeof(*tlck), 0);
 829                BUG();
 830        }
 831        INCREMENT(stattx.waitlock);     /* statistics */
 832        TXN_UNLOCK();
 833        release_metapage(mp);
 834        TXN_LOCK();
 835        xtid = tlck->tid;       /* reacquire after dropping TXN_LOCK */
 836
 837        jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
 838                 tid, xtid, lid);
 839
 840        /* Recheck everything since dropping TXN_LOCK */
 841        if (xtid && (tlck->mp == mp) && (mp->lid == lid))
 842                TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
 843        else
 844                TXN_UNLOCK();
 845        jfs_info("txLock: awakened     tid = %d, lid = %d", tid, lid);
 846
 847        return NULL;
 848}
 849
 850/*
 851 * NAME:        txRelease()
 852 *
 853 * FUNCTION:    Release buffers associated with transaction locks, but don't
 854 *              mark homeok yet.  The allows other transactions to modify
 855 *              buffers, but won't let them go to disk until commit record
 856 *              actually gets written.
 857 *
 858 * PARAMETER:
 859 *              tblk    -
 860 *
 861 * RETURN:      Errors from subroutines.
 862 */
 863static void txRelease(struct tblock * tblk)
 864{
 865        struct metapage *mp;
 866        lid_t lid;
 867        struct tlock *tlck;
 868
 869        TXN_LOCK();
 870
 871        for (lid = tblk->next; lid; lid = tlck->next) {
 872                tlck = lid_to_tlock(lid);
 873                if ((mp = tlck->mp) != NULL &&
 874                    (tlck->type & tlckBTROOT) == 0) {
 875                        assert(mp->xflag & COMMIT_PAGE);
 876                        mp->lid = 0;
 877                }
 878        }
 879
 880        /*
 881         * wakeup transactions waiting on a page locked
 882         * by the current transaction
 883         */
 884        TXN_WAKEUP(&tblk->waitor);
 885
 886        TXN_UNLOCK();
 887}
 888
 889/*
 890 * NAME:        txUnlock()
 891 *
 892 * FUNCTION:    Initiates pageout of pages modified by tid in journalled
 893 *              objects and frees their lockwords.
 894 */
 895static void txUnlock(struct tblock * tblk)
 896{
 897        struct tlock *tlck;
 898        struct linelock *linelock;
 899        lid_t lid, next, llid, k;
 900        struct metapage *mp;
 901        struct jfs_log *log;
 902        int difft, diffp;
 903        unsigned long flags;
 904
 905        jfs_info("txUnlock: tblk = 0x%p", tblk);
 906        log = JFS_SBI(tblk->sb)->log;
 907
 908        /*
 909         * mark page under tlock homeok (its log has been written):
 910         */
 911        for (lid = tblk->next; lid; lid = next) {
 912                tlck = lid_to_tlock(lid);
 913                next = tlck->next;
 914
 915                jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
 916
 917                /* unbind page from tlock */
 918                if ((mp = tlck->mp) != NULL &&
 919                    (tlck->type & tlckBTROOT) == 0) {
 920                        assert(mp->xflag & COMMIT_PAGE);
 921
 922                        /* hold buffer
 923                         */
 924                        hold_metapage(mp);
 925
 926                        assert(mp->nohomeok > 0);
 927                        _metapage_homeok(mp);
 928
 929                        /* inherit younger/larger clsn */
 930                        LOGSYNC_LOCK(log, flags);
 931                        if (mp->clsn) {
 932                                logdiff(difft, tblk->clsn, log);
 933                                logdiff(diffp, mp->clsn, log);
 934                                if (difft > diffp)
 935                                        mp->clsn = tblk->clsn;
 936                        } else
 937                                mp->clsn = tblk->clsn;
 938                        LOGSYNC_UNLOCK(log, flags);
 939
 940                        assert(!(tlck->flag & tlckFREEPAGE));
 941
 942                        put_metapage(mp);
 943                }
 944
 945                /* insert tlock, and linelock(s) of the tlock if any,
 946                 * at head of freelist
 947                 */
 948                TXN_LOCK();
 949
 950                llid = ((struct linelock *) & tlck->lock)->next;
 951                while (llid) {
 952                        linelock = (struct linelock *) lid_to_tlock(llid);
 953                        k = linelock->next;
 954                        txLockFree(llid);
 955                        llid = k;
 956                }
 957                txLockFree(lid);
 958
 959                TXN_UNLOCK();
 960        }
 961        tblk->next = tblk->last = 0;
 962
 963        /*
 964         * remove tblock from logsynclist
 965         * (allocation map pages inherited lsn of tblk and
 966         * has been inserted in logsync list at txUpdateMap())
 967         */
 968        if (tblk->lsn) {
 969                LOGSYNC_LOCK(log, flags);
 970                log->count--;
 971                list_del(&tblk->synclist);
 972                LOGSYNC_UNLOCK(log, flags);
 973        }
 974}
 975
 976/*
 977 *      txMaplock()
 978 *
 979 * function: allocate a transaction lock for freed page/entry;
 980 *      for freed page, maplock is used as xtlock/dtlock type;
 981 */
 982struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
 983{
 984        struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 985        lid_t lid;
 986        struct tblock *tblk;
 987        struct tlock *tlck;
 988        struct maplock *maplock;
 989
 990        TXN_LOCK();
 991
 992        /*
 993         * allocate a tlock
 994         */
 995        lid = txLockAlloc();
 996        tlck = lid_to_tlock(lid);
 997
 998        /*
 999         * initialize tlock
1000         */
1001        tlck->tid = tid;
1002
1003        /* bind the tlock and the object */
1004        tlck->flag = tlckINODELOCK;
1005        if (S_ISDIR(ip->i_mode))
1006                tlck->flag |= tlckDIRECTORY;
1007        tlck->ip = ip;
1008        tlck->mp = NULL;
1009
1010        tlck->type = type;
1011
1012        /*
1013         * enqueue transaction lock to transaction/inode
1014         */
1015        /* insert the tlock at tail of transaction tlock list */
1016        if (tid) {
1017                tblk = tid_to_tblock(tid);
1018                if (tblk->next)
1019                        lid_to_tlock(tblk->last)->next = lid;
1020                else
1021                        tblk->next = lid;
1022                tlck->next = 0;
1023                tblk->last = lid;
1024        }
1025        /* anonymous transaction:
1026         * insert the tlock at head of inode anonymous tlock list
1027         */
1028        else {
1029                tlck->next = jfs_ip->atlhead;
1030                jfs_ip->atlhead = lid;
1031                if (tlck->next == 0) {
1032                        /* This inode's first anonymous transaction */
1033                        jfs_ip->atltail = lid;
1034                        list_add_tail(&jfs_ip->anon_inode_list,
1035                                      &TxAnchor.anon_list);
1036                }
1037        }
1038
1039        TXN_UNLOCK();
1040
1041        /* initialize type dependent area for maplock */
1042        maplock = (struct maplock *) & tlck->lock;
1043        maplock->next = 0;
1044        maplock->maxcnt = 0;
1045        maplock->index = 0;
1046
1047        return tlck;
1048}
1049
1050/*
1051 *      txLinelock()
1052 *
1053 * function: allocate a transaction lock for log vector list
1054 */
1055struct linelock *txLinelock(struct linelock * tlock)
1056{
1057        lid_t lid;
1058        struct tlock *tlck;
1059        struct linelock *linelock;
1060
1061        TXN_LOCK();
1062
1063        /* allocate a TxLock structure */
1064        lid = txLockAlloc();
1065        tlck = lid_to_tlock(lid);
1066
1067        TXN_UNLOCK();
1068
1069        /* initialize linelock */
1070        linelock = (struct linelock *) tlck;
1071        linelock->next = 0;
1072        linelock->flag = tlckLINELOCK;
1073        linelock->maxcnt = TLOCKLONG;
1074        linelock->index = 0;
1075        if (tlck->flag & tlckDIRECTORY)
1076                linelock->flag |= tlckDIRECTORY;
1077
1078        /* append linelock after tlock */
1079        linelock->next = tlock->next;
1080        tlock->next = lid;
1081
1082        return linelock;
1083}
1084
1085/*
1086 *              transaction commit management
1087 *              -----------------------------
1088 */
1089
1090/*
1091 * NAME:        txCommit()
1092 *
1093 * FUNCTION:    commit the changes to the objects specified in
1094 *              clist.  For journalled segments only the
1095 *              changes of the caller are committed, ie by tid.
1096 *              for non-journalled segments the data are flushed to
1097 *              disk and then the change to the disk inode and indirect
1098 *              blocks committed (so blocks newly allocated to the
1099 *              segment will be made a part of the segment atomically).
1100 *
1101 *              all of the segments specified in clist must be in
1102 *              one file system. no more than 6 segments are needed
1103 *              to handle all unix svcs.
1104 *
1105 *              if the i_nlink field (i.e. disk inode link count)
1106 *              is zero, and the type of inode is a regular file or
1107 *              directory, or symbolic link , the inode is truncated
1108 *              to zero length. the truncation is committed but the
1109 *              VM resources are unaffected until it is closed (see
1110 *              iput and iclose).
1111 *
1112 * PARAMETER:
1113 *
1114 * RETURN:
1115 *
1116 * serialization:
1117 *              on entry the inode lock on each segment is assumed
1118 *              to be held.
1119 *
1120 * i/o error:
1121 */
1122int txCommit(tid_t tid,         /* transaction identifier */
1123             int nip,           /* number of inodes to commit */
1124             struct inode **iplist,     /* list of inode to commit */
1125             int flag)
1126{
1127        int rc = 0;
1128        struct commit cd;
1129        struct jfs_log *log;
1130        struct tblock *tblk;
1131        struct lrd *lrd;
1132        struct inode *ip;
1133        struct jfs_inode_info *jfs_ip;
1134        int k, n;
1135        ino_t top;
1136        struct super_block *sb;
1137
1138        jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1139        /* is read-only file system ? */
1140        if (isReadOnly(iplist[0])) {
1141                rc = -EROFS;
1142                goto TheEnd;
1143        }
1144
1145        sb = cd.sb = iplist[0]->i_sb;
1146        cd.tid = tid;
1147
1148        if (tid == 0)
1149                tid = txBegin(sb, 0);
1150        tblk = tid_to_tblock(tid);
1151
1152        /*
1153         * initialize commit structure
1154         */
1155        log = JFS_SBI(sb)->log;
1156        cd.log = log;
1157
1158        /* initialize log record descriptor in commit */
1159        lrd = &cd.lrd;
1160        lrd->logtid = cpu_to_le32(tblk->logtid);
1161        lrd->backchain = 0;
1162
1163        tblk->xflag |= flag;
1164
1165        if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1166                tblk->xflag |= COMMIT_LAZY;
1167        /*
1168         *      prepare non-journaled objects for commit
1169         *
1170         * flush data pages of non-journaled file
1171         * to prevent the file getting non-initialized disk blocks
1172         * in case of crash.
1173         * (new blocks - )
1174         */
1175        cd.iplist = iplist;
1176        cd.nip = nip;
1177
1178        /*
1179         *      acquire transaction lock on (on-disk) inodes
1180         *
1181         * update on-disk inode from in-memory inode
1182         * acquiring transaction locks for AFTER records
1183         * on the on-disk inode of file object
1184         *
1185         * sort the inodes array by inode number in descending order
1186         * to prevent deadlock when acquiring transaction lock
1187         * of on-disk inodes on multiple on-disk inode pages by
1188         * multiple concurrent transactions
1189         */
1190        for (k = 0; k < cd.nip; k++) {
1191                top = (cd.iplist[k])->i_ino;
1192                for (n = k + 1; n < cd.nip; n++) {
1193                        ip = cd.iplist[n];
1194                        if (ip->i_ino > top) {
1195                                top = ip->i_ino;
1196                                cd.iplist[n] = cd.iplist[k];
1197                                cd.iplist[k] = ip;
1198                        }
1199                }
1200
1201                ip = cd.iplist[k];
1202                jfs_ip = JFS_IP(ip);
1203
1204                /*
1205                 * BUGBUG - This code has temporarily been removed.  The
1206                 * intent is to ensure that any file data is written before
1207                 * the metadata is committed to the journal.  This prevents
1208                 * uninitialized data from appearing in a file after the
1209                 * journal has been replayed.  (The uninitialized data
1210                 * could be sensitive data removed by another user.)
1211                 *
1212                 * The problem now is that we are holding the IWRITELOCK
1213                 * on the inode, and calling filemap_fdatawrite on an
1214                 * unmapped page will cause a deadlock in jfs_get_block.
1215                 *
1216                 * The long term solution is to pare down the use of
1217                 * IWRITELOCK.  We are currently holding it too long.
1218                 * We could also be smarter about which data pages need
1219                 * to be written before the transaction is committed and
1220                 * when we don't need to worry about it at all.
1221                 *
1222                 * if ((!S_ISDIR(ip->i_mode))
1223                 *    && (tblk->flag & COMMIT_DELETE) == 0)
1224                 *      filemap_write_and_wait(ip->i_mapping);
1225                 */
1226
1227                /*
1228                 * Mark inode as not dirty.  It will still be on the dirty
1229                 * inode list, but we'll know not to commit it again unless
1230                 * it gets marked dirty again
1231                 */
1232                clear_cflag(COMMIT_Dirty, ip);
1233
1234                /* inherit anonymous tlock(s) of inode */
1235                if (jfs_ip->atlhead) {
1236                        lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1237                        tblk->next = jfs_ip->atlhead;
1238                        if (!tblk->last)
1239                                tblk->last = jfs_ip->atltail;
1240                        jfs_ip->atlhead = jfs_ip->atltail = 0;
1241                        TXN_LOCK();
1242                        list_del_init(&jfs_ip->anon_inode_list);
1243                        TXN_UNLOCK();
1244                }
1245
1246                /*
1247                 * acquire transaction lock on on-disk inode page
1248                 * (become first tlock of the tblk's tlock list)
1249                 */
1250                if (((rc = diWrite(tid, ip))))
1251                        goto out;
1252        }
1253
1254        /*
1255         *      write log records from transaction locks
1256         *
1257         * txUpdateMap() resets XAD_NEW in XAD.
1258         */
1259        txLog(log, tblk, &cd);
1260
1261        /*
1262         * Ensure that inode isn't reused before
1263         * lazy commit thread finishes processing
1264         */
1265        if (tblk->xflag & COMMIT_DELETE) {
1266                ihold(tblk->u.ip);
1267                /*
1268                 * Avoid a rare deadlock
1269                 *
1270                 * If the inode is locked, we may be blocked in
1271                 * jfs_commit_inode.  If so, we don't want the
1272                 * lazy_commit thread doing the last iput() on the inode
1273                 * since that may block on the locked inode.  Instead,
1274                 * commit the transaction synchronously, so the last iput
1275                 * will be done by the calling thread (or later)
1276                 */
1277                /*
1278                 * I believe this code is no longer needed.  Splitting I_LOCK
1279                 * into two bits, I_NEW and I_SYNC should prevent this
1280                 * deadlock as well.  But since I don't have a JFS testload
1281                 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
1282                 * Joern
1283                 */
1284                if (tblk->u.ip->i_state & I_SYNC)
1285                        tblk->xflag &= ~COMMIT_LAZY;
1286        }
1287
1288        ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1289               ((tblk->u.ip->i_nlink == 0) &&
1290                !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1291
1292        /*
1293         *      write COMMIT log record
1294         */
1295        lrd->type = cpu_to_le16(LOG_COMMIT);
1296        lrd->length = 0;
1297        lmLog(log, tblk, lrd, NULL);
1298
1299        lmGroupCommit(log, tblk);
1300
1301        /*
1302         *      - transaction is now committed -
1303         */
1304
1305        /*
1306         * force pages in careful update
1307         * (imap addressing structure update)
1308         */
1309        if (flag & COMMIT_FORCE)
1310                txForce(tblk);
1311
1312        /*
1313         *      update allocation map.
1314         *
1315         * update inode allocation map and inode:
1316         * free pager lock on memory object of inode if any.
1317         * update block allocation map.
1318         *
1319         * txUpdateMap() resets XAD_NEW in XAD.
1320         */
1321        if (tblk->xflag & COMMIT_FORCE)
1322                txUpdateMap(tblk);
1323
1324        /*
1325         *      free transaction locks and pageout/free pages
1326         */
1327        txRelease(tblk);
1328
1329        if ((tblk->flag & tblkGC_LAZY) == 0)
1330                txUnlock(tblk);
1331
1332
1333        /*
1334         *      reset in-memory object state
1335         */
1336        for (k = 0; k < cd.nip; k++) {
1337                ip = cd.iplist[k];
1338                jfs_ip = JFS_IP(ip);
1339
1340                /*
1341                 * reset in-memory inode state
1342                 */
1343                jfs_ip->bxflag = 0;
1344                jfs_ip->blid = 0;
1345        }
1346
1347      out:
1348        if (rc != 0)
1349                txAbort(tid, 1);
1350
1351      TheEnd:
1352        jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1353        return rc;
1354}
1355
1356/*
1357 * NAME:        txLog()
1358 *
1359 * FUNCTION:    Writes AFTER log records for all lines modified
1360 *              by tid for segments specified by inodes in comdata.
1361 *              Code assumes only WRITELOCKS are recorded in lockwords.
1362 *
1363 * PARAMETERS:
1364 *
1365 * RETURN :
1366 */
1367static void txLog(struct jfs_log *log, struct tblock *tblk, struct commit *cd)
1368{
1369        struct inode *ip;
1370        lid_t lid;
1371        struct tlock *tlck;
1372        struct lrd *lrd = &cd->lrd;
1373
1374        /*
1375         * write log record(s) for each tlock of transaction,
1376         */
1377        for (lid = tblk->next; lid; lid = tlck->next) {
1378                tlck = lid_to_tlock(lid);
1379
1380                tlck->flag |= tlckLOG;
1381
1382                /* initialize lrd common */
1383                ip = tlck->ip;
1384                lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1385                lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1386                lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1387
1388                /* write log record of page from the tlock */
1389                switch (tlck->type & tlckTYPE) {
1390                case tlckXTREE:
1391                        xtLog(log, tblk, lrd, tlck);
1392                        break;
1393
1394                case tlckDTREE:
1395                        dtLog(log, tblk, lrd, tlck);
1396                        break;
1397
1398                case tlckINODE:
1399                        diLog(log, tblk, lrd, tlck, cd);
1400                        break;
1401
1402                case tlckMAP:
1403                        mapLog(log, tblk, lrd, tlck);
1404                        break;
1405
1406                case tlckDATA:
1407                        dataLog(log, tblk, lrd, tlck);
1408                        break;
1409
1410                default:
1411                        jfs_err("UFO tlock:0x%p", tlck);
1412                }
1413        }
1414
1415        return;
1416}
1417
1418/*
1419 *      diLog()
1420 *
1421 * function:    log inode tlock and format maplock to update bmap;
1422 */
1423static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1424                 struct tlock *tlck, struct commit *cd)
1425{
1426        struct metapage *mp;
1427        pxd_t *pxd;
1428        struct pxd_lock *pxdlock;
1429
1430        mp = tlck->mp;
1431
1432        /* initialize as REDOPAGE record format */
1433        lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1434        lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1435
1436        pxd = &lrd->log.redopage.pxd;
1437
1438        /*
1439         *      inode after image
1440         */
1441        if (tlck->type & tlckENTRY) {
1442                /* log after-image for logredo(): */
1443                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1444                PXDaddress(pxd, mp->index);
1445                PXDlength(pxd,
1446                          mp->logical_size >> tblk->sb->s_blocksize_bits);
1447                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1448
1449                /* mark page as homeward bound */
1450                tlck->flag |= tlckWRITEPAGE;
1451        } else if (tlck->type & tlckFREE) {
1452                /*
1453                 *      free inode extent
1454                 *
1455                 * (pages of the freed inode extent have been invalidated and
1456                 * a maplock for free of the extent has been formatted at
1457                 * txLock() time);
1458                 *
1459                 * the tlock had been acquired on the inode allocation map page
1460                 * (iag) that specifies the freed extent, even though the map
1461                 * page is not itself logged, to prevent pageout of the map
1462                 * page before the log;
1463                 */
1464
1465                /* log LOG_NOREDOINOEXT of the freed inode extent for
1466                 * logredo() to start NoRedoPage filters, and to update
1467                 * imap and bmap for free of the extent;
1468                 */
1469                lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1470                /*
1471                 * For the LOG_NOREDOINOEXT record, we need
1472                 * to pass the IAG number and inode extent
1473                 * index (within that IAG) from which the
1474                 * extent is being released.  These have been
1475                 * passed to us in the iplist[1] and iplist[2].
1476                 */
1477                lrd->log.noredoinoext.iagnum =
1478                    cpu_to_le32((u32) (size_t) cd->iplist[1]);
1479                lrd->log.noredoinoext.inoext_idx =
1480                    cpu_to_le32((u32) (size_t) cd->iplist[2]);
1481
1482                pxdlock = (struct pxd_lock *) & tlck->lock;
1483                *pxd = pxdlock->pxd;
1484                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1485
1486                /* update bmap */
1487                tlck->flag |= tlckUPDATEMAP;
1488
1489                /* mark page as homeward bound */
1490                tlck->flag |= tlckWRITEPAGE;
1491        } else
1492                jfs_err("diLog: UFO type tlck:0x%p", tlck);
1493#ifdef  _JFS_WIP
1494        /*
1495         *      alloc/free external EA extent
1496         *
1497         * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1498         * of the extent has been formatted at txLock() time;
1499         */
1500        else {
1501                assert(tlck->type & tlckEA);
1502
1503                /* log LOG_UPDATEMAP for logredo() to update bmap for
1504                 * alloc of new (and free of old) external EA extent;
1505                 */
1506                lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1507                pxdlock = (struct pxd_lock *) & tlck->lock;
1508                nlock = pxdlock->index;
1509                for (i = 0; i < nlock; i++, pxdlock++) {
1510                        if (pxdlock->flag & mlckALLOCPXD)
1511                                lrd->log.updatemap.type =
1512                                    cpu_to_le16(LOG_ALLOCPXD);
1513                        else
1514                                lrd->log.updatemap.type =
1515                                    cpu_to_le16(LOG_FREEPXD);
1516                        lrd->log.updatemap.nxd = cpu_to_le16(1);
1517                        lrd->log.updatemap.pxd = pxdlock->pxd;
1518                        lrd->backchain =
1519                            cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1520                }
1521
1522                /* update bmap */
1523                tlck->flag |= tlckUPDATEMAP;
1524        }
1525#endif                          /* _JFS_WIP */
1526
1527        return;
1528}
1529
1530/*
1531 *      dataLog()
1532 *
1533 * function:    log data tlock
1534 */
1535static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1536            struct tlock *tlck)
1537{
1538        struct metapage *mp;
1539        pxd_t *pxd;
1540
1541        mp = tlck->mp;
1542
1543        /* initialize as REDOPAGE record format */
1544        lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1545        lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1546
1547        pxd = &lrd->log.redopage.pxd;
1548
1549        /* log after-image for logredo(): */
1550        lrd->type = cpu_to_le16(LOG_REDOPAGE);
1551
1552        if (jfs_dirtable_inline(tlck->ip)) {
1553                /*
1554                 * The table has been truncated, we've must have deleted
1555                 * the last entry, so don't bother logging this
1556                 */
1557                mp->lid = 0;
1558                grab_metapage(mp);
1559                metapage_homeok(mp);
1560                discard_metapage(mp);
1561                tlck->mp = NULL;
1562                return;
1563        }
1564
1565        PXDaddress(pxd, mp->index);
1566        PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1567
1568        lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1569
1570        /* mark page as homeward bound */
1571        tlck->flag |= tlckWRITEPAGE;
1572
1573        return;
1574}
1575
1576/*
1577 *      dtLog()
1578 *
1579 * function:    log dtree tlock and format maplock to update bmap;
1580 */
1581static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1582           struct tlock * tlck)
1583{
1584        struct metapage *mp;
1585        struct pxd_lock *pxdlock;
1586        pxd_t *pxd;
1587
1588        mp = tlck->mp;
1589
1590        /* initialize as REDOPAGE/NOREDOPAGE record format */
1591        lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1592        lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1593
1594        pxd = &lrd->log.redopage.pxd;
1595
1596        if (tlck->type & tlckBTROOT)
1597                lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1598
1599        /*
1600         *      page extension via relocation: entry insertion;
1601         *      page extension in-place: entry insertion;
1602         *      new right page from page split, reinitialized in-line
1603         *      root from root page split: entry insertion;
1604         */
1605        if (tlck->type & (tlckNEW | tlckEXTEND)) {
1606                /* log after-image of the new page for logredo():
1607                 * mark log (LOG_NEW) for logredo() to initialize
1608                 * freelist and update bmap for alloc of the new page;
1609                 */
1610                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1611                if (tlck->type & tlckEXTEND)
1612                        lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1613                else
1614                        lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1615                PXDaddress(pxd, mp->index);
1616                PXDlength(pxd,
1617                          mp->logical_size >> tblk->sb->s_blocksize_bits);
1618                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1619
1620                /* format a maplock for txUpdateMap() to update bPMAP for
1621                 * alloc of the new page;
1622                 */
1623                if (tlck->type & tlckBTROOT)
1624                        return;
1625                tlck->flag |= tlckUPDATEMAP;
1626                pxdlock = (struct pxd_lock *) & tlck->lock;
1627                pxdlock->flag = mlckALLOCPXD;
1628                pxdlock->pxd = *pxd;
1629
1630                pxdlock->index = 1;
1631
1632                /* mark page as homeward bound */
1633                tlck->flag |= tlckWRITEPAGE;
1634                return;
1635        }
1636
1637        /*
1638         *      entry insertion/deletion,
1639         *      sibling page link update (old right page before split);
1640         */
1641        if (tlck->type & (tlckENTRY | tlckRELINK)) {
1642                /* log after-image for logredo(): */
1643                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1644                PXDaddress(pxd, mp->index);
1645                PXDlength(pxd,
1646                          mp->logical_size >> tblk->sb->s_blocksize_bits);
1647                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1648
1649                /* mark page as homeward bound */
1650                tlck->flag |= tlckWRITEPAGE;
1651                return;
1652        }
1653
1654        /*
1655         *      page deletion: page has been invalidated
1656         *      page relocation: source extent
1657         *
1658         *      a maplock for free of the page has been formatted
1659         *      at txLock() time);
1660         */
1661        if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1662                /* log LOG_NOREDOPAGE of the deleted page for logredo()
1663                 * to start NoRedoPage filter and to update bmap for free
1664                 * of the deletd page
1665                 */
1666                lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1667                pxdlock = (struct pxd_lock *) & tlck->lock;
1668                *pxd = pxdlock->pxd;
1669                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1670
1671                /* a maplock for txUpdateMap() for free of the page
1672                 * has been formatted at txLock() time;
1673                 */
1674                tlck->flag |= tlckUPDATEMAP;
1675        }
1676        return;
1677}
1678
1679/*
1680 *      xtLog()
1681 *
1682 * function:    log xtree tlock and format maplock to update bmap;
1683 */
1684static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1685           struct tlock * tlck)
1686{
1687        struct inode *ip;
1688        struct metapage *mp;
1689        xtpage_t *p;
1690        struct xtlock *xtlck;
1691        struct maplock *maplock;
1692        struct xdlistlock *xadlock;
1693        struct pxd_lock *pxdlock;
1694        pxd_t *page_pxd;
1695        int next, lwm, hwm;
1696
1697        ip = tlck->ip;
1698        mp = tlck->mp;
1699
1700        /* initialize as REDOPAGE/NOREDOPAGE record format */
1701        lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1702        lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1703
1704        page_pxd = &lrd->log.redopage.pxd;
1705
1706        if (tlck->type & tlckBTROOT) {
1707                lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1708                p = &JFS_IP(ip)->i_xtroot;
1709                if (S_ISDIR(ip->i_mode))
1710                        lrd->log.redopage.type |=
1711                            cpu_to_le16(LOG_DIR_XTREE);
1712        } else
1713                p = (xtpage_t *) mp->data;
1714        next = le16_to_cpu(p->header.nextindex);
1715
1716        xtlck = (struct xtlock *) & tlck->lock;
1717
1718        maplock = (struct maplock *) & tlck->lock;
1719        xadlock = (struct xdlistlock *) maplock;
1720
1721        /*
1722         *      entry insertion/extension;
1723         *      sibling page link update (old right page before split);
1724         */
1725        if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1726                /* log after-image for logredo():
1727                 * logredo() will update bmap for alloc of new/extended
1728                 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1729                 * after-image of XADlist;
1730                 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1731                 * applying the after-image to the meta-data page.
1732                 */
1733                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1734                PXDaddress(page_pxd, mp->index);
1735                PXDlength(page_pxd,
1736                          mp->logical_size >> tblk->sb->s_blocksize_bits);
1737                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1738
1739                /* format a maplock for txUpdateMap() to update bPMAP
1740                 * for alloc of new/extended extents of XAD[lwm:next)
1741                 * from the page itself;
1742                 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1743                 */
1744                lwm = xtlck->lwm.offset;
1745                if (lwm == 0)
1746                        lwm = XTPAGEMAXSLOT;
1747
1748                if (lwm == next)
1749                        goto out;
1750                if (lwm > next) {
1751                        jfs_err("xtLog: lwm > next");
1752                        goto out;
1753                }
1754                tlck->flag |= tlckUPDATEMAP;
1755                xadlock->flag = mlckALLOCXADLIST;
1756                xadlock->count = next - lwm;
1757                if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1758                        int i;
1759                        pxd_t *pxd;
1760                        /*
1761                         * Lazy commit may allow xtree to be modified before
1762                         * txUpdateMap runs.  Copy xad into linelock to
1763                         * preserve correct data.
1764                         *
1765                         * We can fit twice as may pxd's as xads in the lock
1766                         */
1767                        xadlock->flag = mlckALLOCPXDLIST;
1768                        pxd = xadlock->xdlist = &xtlck->pxdlock;
1769                        for (i = 0; i < xadlock->count; i++) {
1770                                PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1771                                PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1772                                p->xad[lwm + i].flag &=
1773                                    ~(XAD_NEW | XAD_EXTENDED);
1774                                pxd++;
1775                        }
1776                } else {
1777                        /*
1778                         * xdlist will point to into inode's xtree, ensure
1779                         * that transaction is not committed lazily.
1780                         */
1781                        xadlock->flag = mlckALLOCXADLIST;
1782                        xadlock->xdlist = &p->xad[lwm];
1783                        tblk->xflag &= ~COMMIT_LAZY;
1784                }
1785                jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
1786                         tlck->ip, mp, tlck, lwm, xadlock->count);
1787
1788                maplock->index = 1;
1789
1790              out:
1791                /* mark page as homeward bound */
1792                tlck->flag |= tlckWRITEPAGE;
1793
1794                return;
1795        }
1796
1797        /*
1798         *      page deletion: file deletion/truncation (ref. xtTruncate())
1799         *
1800         * (page will be invalidated after log is written and bmap
1801         * is updated from the page);
1802         */
1803        if (tlck->type & tlckFREE) {
1804                /* LOG_NOREDOPAGE log for NoRedoPage filter:
1805                 * if page free from file delete, NoRedoFile filter from
1806                 * inode image of zero link count will subsume NoRedoPage
1807                 * filters for each page;
1808                 * if page free from file truncattion, write NoRedoPage
1809                 * filter;
1810                 *
1811                 * upadte of block allocation map for the page itself:
1812                 * if page free from deletion and truncation, LOG_UPDATEMAP
1813                 * log for the page itself is generated from processing
1814                 * its parent page xad entries;
1815                 */
1816                /* if page free from file truncation, log LOG_NOREDOPAGE
1817                 * of the deleted page for logredo() to start NoRedoPage
1818                 * filter for the page;
1819                 */
1820                if (tblk->xflag & COMMIT_TRUNCATE) {
1821                        /* write NOREDOPAGE for the page */
1822                        lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1823                        PXDaddress(page_pxd, mp->index);
1824                        PXDlength(page_pxd,
1825                                  mp->logical_size >> tblk->sb->
1826                                  s_blocksize_bits);
1827                        lrd->backchain =
1828                            cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1829
1830                        if (tlck->type & tlckBTROOT) {
1831                                /* Empty xtree must be logged */
1832                                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1833                                lrd->backchain =
1834                                    cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1835                        }
1836                }
1837
1838                /* init LOG_UPDATEMAP of the freed extents
1839                 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1840                 * for logredo() to update bmap;
1841                 */
1842                lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1843                lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1844                xtlck = (struct xtlock *) & tlck->lock;
1845                hwm = xtlck->hwm.offset;
1846                lrd->log.updatemap.nxd =
1847                    cpu_to_le16(hwm - XTENTRYSTART + 1);
1848                /* reformat linelock for lmLog() */
1849                xtlck->header.offset = XTENTRYSTART;
1850                xtlck->header.length = hwm - XTENTRYSTART + 1;
1851                xtlck->index = 1;
1852                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1853
1854                /* format a maplock for txUpdateMap() to update bmap
1855                 * to free extents of XAD[XTENTRYSTART:hwm) from the
1856                 * deleted page itself;
1857                 */
1858                tlck->flag |= tlckUPDATEMAP;
1859                xadlock->count = hwm - XTENTRYSTART + 1;
1860                if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1861                        int i;
1862                        pxd_t *pxd;
1863                        /*
1864                         * Lazy commit may allow xtree to be modified before
1865                         * txUpdateMap runs.  Copy xad into linelock to
1866                         * preserve correct data.
1867                         *
1868                         * We can fit twice as may pxd's as xads in the lock
1869                         */
1870                        xadlock->flag = mlckFREEPXDLIST;
1871                        pxd = xadlock->xdlist = &xtlck->pxdlock;
1872                        for (i = 0; i < xadlock->count; i++) {
1873                                PXDaddress(pxd,
1874                                        addressXAD(&p->xad[XTENTRYSTART + i]));
1875                                PXDlength(pxd,
1876                                        lengthXAD(&p->xad[XTENTRYSTART + i]));
1877                                pxd++;
1878                        }
1879                } else {
1880                        /*
1881                         * xdlist will point to into inode's xtree, ensure
1882                         * that transaction is not committed lazily.
1883                         */
1884                        xadlock->flag = mlckFREEXADLIST;
1885                        xadlock->xdlist = &p->xad[XTENTRYSTART];
1886                        tblk->xflag &= ~COMMIT_LAZY;
1887                }
1888                jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1889                         tlck->ip, mp, xadlock->count);
1890
1891                maplock->index = 1;
1892
1893                /* mark page as invalid */
1894                if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1895                    && !(tlck->type & tlckBTROOT))
1896                        tlck->flag |= tlckFREEPAGE;
1897                /*
1898                   else (tblk->xflag & COMMIT_PMAP)
1899                   ? release the page;
1900                 */
1901                return;
1902        }
1903
1904        /*
1905         *      page/entry truncation: file truncation (ref. xtTruncate())
1906         *
1907         *      |----------+------+------+---------------|
1908         *                 |      |      |
1909         *                 |      |     hwm - hwm before truncation
1910         *                 |     next - truncation point
1911         *                lwm - lwm before truncation
1912         * header ?
1913         */
1914        if (tlck->type & tlckTRUNCATE) {
1915                pxd_t pxd;      /* truncated extent of xad */
1916                int twm;
1917
1918                /*
1919                 * For truncation the entire linelock may be used, so it would
1920                 * be difficult to store xad list in linelock itself.
1921                 * Therefore, we'll just force transaction to be committed
1922                 * synchronously, so that xtree pages won't be changed before
1923                 * txUpdateMap runs.
1924                 */
1925                tblk->xflag &= ~COMMIT_LAZY;
1926                lwm = xtlck->lwm.offset;
1927                if (lwm == 0)
1928                        lwm = XTPAGEMAXSLOT;
1929                hwm = xtlck->hwm.offset;
1930                twm = xtlck->twm.offset;
1931
1932                /*
1933                 *      write log records
1934                 */
1935                /* log after-image for logredo():
1936                 *
1937                 * logredo() will update bmap for alloc of new/extended
1938                 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1939                 * after-image of XADlist;
1940                 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1941                 * applying the after-image to the meta-data page.
1942                 */
1943                lrd->type = cpu_to_le16(LOG_REDOPAGE);
1944                PXDaddress(page_pxd, mp->index);
1945                PXDlength(page_pxd,
1946                          mp->logical_size >> tblk->sb->s_blocksize_bits);
1947                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1948
1949                /*
1950                 * truncate entry XAD[twm == next - 1]:
1951                 */
1952                if (twm == next - 1) {
1953                        /* init LOG_UPDATEMAP for logredo() to update bmap for
1954                         * free of truncated delta extent of the truncated
1955                         * entry XAD[next - 1]:
1956                         * (xtlck->pxdlock = truncated delta extent);
1957                         */
1958                        pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1959                        /* assert(pxdlock->type & tlckTRUNCATE); */
1960                        lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1961                        lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1962                        lrd->log.updatemap.nxd = cpu_to_le16(1);
1963                        lrd->log.updatemap.pxd = pxdlock->pxd;
1964                        pxd = pxdlock->pxd;     /* save to format maplock */
1965                        lrd->backchain =
1966                            cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1967                }
1968
1969                /*
1970                 * free entries XAD[next:hwm]:
1971                 */
1972                if (hwm >= next) {
1973                        /* init LOG_UPDATEMAP of the freed extents
1974                         * XAD[next:hwm] from the deleted page itself
1975                         * for logredo() to update bmap;
1976                         */
1977                        lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1978                        lrd->log.updatemap.type =
1979                            cpu_to_le16(LOG_FREEXADLIST);
1980                        xtlck = (struct xtlock *) & tlck->lock;
1981                        hwm = xtlck->hwm.offset;
1982                        lrd->log.updatemap.nxd =
1983                            cpu_to_le16(hwm - next + 1);
1984                        /* reformat linelock for lmLog() */
1985                        xtlck->header.offset = next;
1986                        xtlck->header.length = hwm - next + 1;
1987                        xtlck->index = 1;
1988                        lrd->backchain =
1989                            cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1990                }
1991
1992                /*
1993                 *      format maplock(s) for txUpdateMap() to update bmap
1994                 */
1995                maplock->index = 0;
1996
1997                /*
1998                 * allocate entries XAD[lwm:next):
1999                 */
2000                if (lwm < next) {
2001                        /* format a maplock for txUpdateMap() to update bPMAP
2002                         * for alloc of new/extended extents of XAD[lwm:next)
2003                         * from the page itself;
2004                         * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
2005                         */
2006                        tlck->flag |= tlckUPDATEMAP;
2007                        xadlock->flag = mlckALLOCXADLIST;
2008                        xadlock->count = next - lwm;
2009                        xadlock->xdlist = &p->xad[lwm];
2010
2011                        jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
2012                                 tlck->ip, mp, xadlock->count, lwm, next);
2013                        maplock->index++;
2014                        xadlock++;
2015                }
2016
2017                /*
2018                 * truncate entry XAD[twm == next - 1]:
2019                 */
2020                if (twm == next - 1) {
2021                        /* format a maplock for txUpdateMap() to update bmap
2022                         * to free truncated delta extent of the truncated
2023                         * entry XAD[next - 1];
2024                         * (xtlck->pxdlock = truncated delta extent);
2025                         */
2026                        tlck->flag |= tlckUPDATEMAP;
2027                        pxdlock = (struct pxd_lock *) xadlock;
2028                        pxdlock->flag = mlckFREEPXD;
2029                        pxdlock->count = 1;
2030                        pxdlock->pxd = pxd;
2031
2032                        jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
2033                                 ip, mp, pxdlock->count, hwm);
2034                        maplock->index++;
2035                        xadlock++;
2036                }
2037
2038                /*
2039                 * free entries XAD[next:hwm]:
2040                 */
2041                if (hwm >= next) {
2042                        /* format a maplock for txUpdateMap() to update bmap
2043                         * to free extents of XAD[next:hwm] from thedeleted
2044                         * page itself;
2045                         */
2046                        tlck->flag |= tlckUPDATEMAP;
2047                        xadlock->flag = mlckFREEXADLIST;
2048                        xadlock->count = hwm - next + 1;
2049                        xadlock->xdlist = &p->xad[next];
2050
2051                        jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
2052                                 tlck->ip, mp, xadlock->count, next, hwm);
2053                        maplock->index++;
2054                }
2055
2056                /* mark page as homeward bound */
2057                tlck->flag |= tlckWRITEPAGE;
2058        }
2059        return;
2060}
2061
2062/*
2063 *      mapLog()
2064 *
2065 * function:    log from maplock of freed data extents;
2066 */
2067static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2068                   struct tlock * tlck)
2069{
2070        struct pxd_lock *pxdlock;
2071        int i, nlock;
2072        pxd_t *pxd;
2073
2074        /*
2075         *      page relocation: free the source page extent
2076         *
2077         * a maplock for txUpdateMap() for free of the page
2078         * has been formatted at txLock() time saving the src
2079         * relocated page address;
2080         */
2081        if (tlck->type & tlckRELOCATE) {
2082                /* log LOG_NOREDOPAGE of the old relocated page
2083                 * for logredo() to start NoRedoPage filter;
2084                 */
2085                lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2086                pxdlock = (struct pxd_lock *) & tlck->lock;
2087                pxd = &lrd->log.redopage.pxd;
2088                *pxd = pxdlock->pxd;
2089                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2090
2091                /* (N.B. currently, logredo() does NOT update bmap
2092                 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2093                 * if page free from relocation, LOG_UPDATEMAP log is
2094                 * specifically generated now for logredo()
2095                 * to update bmap for free of src relocated page;
2096                 * (new flag LOG_RELOCATE may be introduced which will
2097                 * inform logredo() to start NORedoPage filter and also
2098                 * update block allocation map at the same time, thus
2099                 * avoiding an extra log write);
2100                 */
2101                lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2102                lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2103                lrd->log.updatemap.nxd = cpu_to_le16(1);
2104                lrd->log.updatemap.pxd = pxdlock->pxd;
2105                lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2106
2107                /* a maplock for txUpdateMap() for free of the page
2108                 * has been formatted at txLock() time;
2109                 */
2110                tlck->flag |= tlckUPDATEMAP;
2111                return;
2112        }
2113        /*
2114
2115         * Otherwise it's not a relocate request
2116         *
2117         */
2118        else {
2119                /* log LOG_UPDATEMAP for logredo() to update bmap for
2120                 * free of truncated/relocated delta extent of the data;
2121                 * e.g.: external EA extent, relocated/truncated extent
2122                 * from xtTailgate();
2123                 */
2124                lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2125                pxdlock = (struct pxd_lock *) & tlck->lock;
2126                nlock = pxdlock->index;
2127                for (i = 0; i < nlock; i++, pxdlock++) {
2128                        if (pxdlock->flag & mlckALLOCPXD)
2129                                lrd->log.updatemap.type =
2130                                    cpu_to_le16(LOG_ALLOCPXD);
2131                        else
2132                                lrd->log.updatemap.type =
2133                                    cpu_to_le16(LOG_FREEPXD);
2134                        lrd->log.updatemap.nxd = cpu_to_le16(1);
2135                        lrd->log.updatemap.pxd = pxdlock->pxd;
2136                        lrd->backchain =
2137                            cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2138                        jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2139                                 (ulong) addressPXD(&pxdlock->pxd),
2140                                 lengthPXD(&pxdlock->pxd));
2141                }
2142
2143                /* update bmap */
2144                tlck->flag |= tlckUPDATEMAP;
2145        }
2146}
2147
2148/*
2149 *      txEA()
2150 *
2151 * function:    acquire maplock for EA/ACL extents or
2152 *              set COMMIT_INLINE flag;
2153 */
2154void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2155{
2156        struct tlock *tlck = NULL;
2157        struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2158
2159        /*
2160         * format maplock for alloc of new EA extent
2161         */
2162        if (newea) {
2163                /* Since the newea could be a completely zeroed entry we need to
2164                 * check for the two flags which indicate we should actually
2165                 * commit new EA data
2166                 */
2167                if (newea->flag & DXD_EXTENT) {
2168                        tlck = txMaplock(tid, ip, tlckMAP);
2169                        maplock = (struct pxd_lock *) & tlck->lock;
2170                        pxdlock = (struct pxd_lock *) maplock;
2171                        pxdlock->flag = mlckALLOCPXD;
2172                        PXDaddress(&pxdlock->pxd, addressDXD(newea));
2173                        PXDlength(&pxdlock->pxd, lengthDXD(newea));
2174                        pxdlock++;
2175                        maplock->index = 1;
2176                } else if (newea->flag & DXD_INLINE) {
2177                        tlck = NULL;
2178
2179                        set_cflag(COMMIT_Inlineea, ip);
2180                }
2181        }
2182
2183        /*
2184         * format maplock for free of old EA extent
2185         */
2186        if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2187                if (tlck == NULL) {
2188                        tlck = txMaplock(tid, ip, tlckMAP);
2189                        maplock = (struct pxd_lock *) & tlck->lock;
2190                        pxdlock = (struct pxd_lock *) maplock;
2191                        maplock->index = 0;
2192                }
2193                pxdlock->flag = mlckFREEPXD;
2194                PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2195                PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2196                maplock->index++;
2197        }
2198}
2199
2200/*
2201 *      txForce()
2202 *
2203 * function: synchronously write pages locked by transaction
2204 *           after txLog() but before txUpdateMap();
2205 */
2206static void txForce(struct tblock * tblk)
2207{
2208        struct tlock *tlck;
2209        lid_t lid, next;
2210        struct metapage *mp;
2211
2212        /*
2213         * reverse the order of transaction tlocks in
2214         * careful update order of address index pages
2215         * (right to left, bottom up)
2216         */
2217        tlck = lid_to_tlock(tblk->next);
2218        lid = tlck->next;
2219        tlck->next = 0;
2220        while (lid) {
2221                tlck = lid_to_tlock(lid);
2222                next = tlck->next;
2223                tlck->next = tblk->next;
2224                tblk->next = lid;
2225                lid = next;
2226        }
2227
2228        /*
2229         * synchronously write the page, and
2230         * hold the page for txUpdateMap();
2231         */
2232        for (lid = tblk->next; lid; lid = next) {
2233                tlck = lid_to_tlock(lid);
2234                next = tlck->next;
2235
2236                if ((mp = tlck->mp) != NULL &&
2237                    (tlck->type & tlckBTROOT) == 0) {
2238                        assert(mp->xflag & COMMIT_PAGE);
2239
2240                        if (tlck->flag & tlckWRITEPAGE) {
2241                                tlck->flag &= ~tlckWRITEPAGE;
2242
2243                                /* do not release page to freelist */
2244                                force_metapage(mp);
2245#if 0
2246                                /*
2247                                 * The "right" thing to do here is to
2248                                 * synchronously write the metadata.
2249                                 * With the current implementation this
2250                                 * is hard since write_metapage requires
2251                                 * us to kunmap & remap the page.  If we
2252                                 * have tlocks pointing into the metadata
2253                                 * pages, we don't want to do this.  I think
2254                                 * we can get by with synchronously writing
2255                                 * the pages when they are released.
2256                                 */
2257                                assert(mp->nohomeok);
2258                                set_bit(META_dirty, &mp->flag);
2259                                set_bit(META_sync, &mp->flag);
2260#endif
2261                        }
2262                }
2263        }
2264}
2265
2266/*
2267 *      txUpdateMap()
2268 *
2269 * function:    update persistent allocation map (and working map
2270 *              if appropriate);
2271 *
2272 * parameter:
2273 */
2274static void txUpdateMap(struct tblock * tblk)
2275{
2276        struct inode *ip;
2277        struct inode *ipimap;
2278        lid_t lid;
2279        struct tlock *tlck;
2280        struct maplock *maplock;
2281        struct pxd_lock pxdlock;
2282        int maptype;
2283        int k, nlock;
2284        struct metapage *mp = NULL;
2285
2286        ipimap = JFS_SBI(tblk->sb)->ipimap;
2287
2288        maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2289
2290
2291        /*
2292         *      update block allocation map
2293         *
2294         * update allocation state in pmap (and wmap) and
2295         * update lsn of the pmap page;
2296         */
2297        /*
2298         * scan each tlock/page of transaction for block allocation/free:
2299         *
2300         * for each tlock/page of transaction, update map.
2301         *  ? are there tlock for pmap and pwmap at the same time ?
2302         */
2303        for (lid = tblk->next; lid; lid = tlck->next) {
2304                tlck = lid_to_tlock(lid);
2305
2306                if ((tlck->flag & tlckUPDATEMAP) == 0)
2307                        continue;
2308
2309                if (tlck->flag & tlckFREEPAGE) {
2310                        /*
2311                         * Another thread may attempt to reuse freed space
2312                         * immediately, so we want to get rid of the metapage
2313                         * before anyone else has a chance to get it.
2314                         * Lock metapage, update maps, then invalidate
2315                         * the metapage.
2316                         */
2317                        mp = tlck->mp;
2318                        ASSERT(mp->xflag & COMMIT_PAGE);
2319                        grab_metapage(mp);
2320                }
2321
2322                /*
2323                 * extent list:
2324                 * . in-line PXD list:
2325                 * . out-of-line XAD list:
2326                 */
2327                maplock = (struct maplock *) & tlck->lock;
2328                nlock = maplock->index;
2329
2330                for (k = 0; k < nlock; k++, maplock++) {
2331                        /*
2332                         * allocate blocks in persistent map:
2333                         *
2334                         * blocks have been allocated from wmap at alloc time;
2335                         */
2336                        if (maplock->flag & mlckALLOC) {
2337                                txAllocPMap(ipimap, maplock, tblk);
2338                        }
2339                        /*
2340                         * free blocks in persistent and working map:
2341                         * blocks will be freed in pmap and then in wmap;
2342                         *
2343                         * ? tblock specifies the PMAP/PWMAP based upon
2344                         * transaction
2345                         *
2346                         * free blocks in persistent map:
2347                         * blocks will be freed from wmap at last reference
2348                         * release of the object for regular files;
2349                         *
2350                         * Alway free blocks from both persistent & working
2351                         * maps for directories
2352                         */
2353                        else {  /* (maplock->flag & mlckFREE) */
2354
2355                                if (tlck->flag & tlckDIRECTORY)
2356                                        txFreeMap(ipimap, maplock,
2357                                                  tblk, COMMIT_PWMAP);
2358                                else
2359                                        txFreeMap(ipimap, maplock,
2360                                                  tblk, maptype);
2361                        }
2362                }
2363                if (tlck->flag & tlckFREEPAGE) {
2364                        if (!(tblk->flag & tblkGC_LAZY)) {
2365                                /* This is equivalent to txRelease */
2366                                ASSERT(mp->lid == lid);
2367                                tlck->mp->lid = 0;
2368                        }
2369                        assert(mp->nohomeok == 1);
2370                        metapage_homeok(mp);
2371                        discard_metapage(mp);
2372                        tlck->mp = NULL;
2373                }
2374        }
2375        /*
2376         *      update inode allocation map
2377         *
2378         * update allocation state in pmap and
2379         * update lsn of the pmap page;
2380         * update in-memory inode flag/state
2381         *
2382         * unlock mapper/write lock
2383         */
2384        if (tblk->xflag & COMMIT_CREATE) {
2385                diUpdatePMap(ipimap, tblk->ino, false, tblk);
2386                /* update persistent block allocation map
2387                 * for the allocation of inode extent;
2388                 */
2389                pxdlock.flag = mlckALLOCPXD;
2390                pxdlock.pxd = tblk->u.ixpxd;
2391                pxdlock.index = 1;
2392                txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2393        } else if (tblk->xflag & COMMIT_DELETE) {
2394                ip = tblk->u.ip;
2395                diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2396                iput(ip);
2397        }
2398}
2399
2400/*
2401 *      txAllocPMap()
2402 *
2403 * function: allocate from persistent map;
2404 *
2405 * parameter:
2406 *      ipbmap  -
2407 *      malock  -
2408 *              xad list:
2409 *              pxd:
2410 *
2411 *      maptype -
2412 *              allocate from persistent map;
2413 *              free from persistent map;
2414 *              (e.g., tmp file - free from working map at releae
2415 *               of last reference);
2416 *              free from persistent and working map;
2417 *
2418 *      lsn     - log sequence number;
2419 */
2420static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2421                        struct tblock * tblk)
2422{
2423        struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2424        struct xdlistlock *xadlistlock;
2425        xad_t *xad;
2426        s64 xaddr;
2427        int xlen;
2428        struct pxd_lock *pxdlock;
2429        struct xdlistlock *pxdlistlock;
2430        pxd_t *pxd;
2431        int n;
2432
2433        /*
2434         * allocate from persistent map;
2435         */
2436        if (maplock->flag & mlckALLOCXADLIST) {
2437                xadlistlock = (struct xdlistlock *) maplock;
2438                xad = xadlistlock->xdlist;
2439                for (n = 0; n < xadlistlock->count; n++, xad++) {
2440                        if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2441                                xaddr = addressXAD(xad);
2442                                xlen = lengthXAD(xad);
2443                                dbUpdatePMap(ipbmap, false, xaddr,
2444                                             (s64) xlen, tblk);
2445                                xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2446                                jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2447                                         (ulong) xaddr, xlen);
2448                        }
2449                }
2450        } else if (maplock->flag & mlckALLOCPXD) {
2451                pxdlock = (struct pxd_lock *) maplock;
2452                xaddr = addressPXD(&pxdlock->pxd);
2453                xlen = lengthPXD(&pxdlock->pxd);
2454                dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2455                jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2456        } else {                /* (maplock->flag & mlckALLOCPXDLIST) */
2457
2458                pxdlistlock = (struct xdlistlock *) maplock;
2459                pxd = pxdlistlock->xdlist;
2460                for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2461                        xaddr = addressPXD(pxd);
2462                        xlen = lengthPXD(pxd);
2463                        dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
2464                                     tblk);
2465                        jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2466                                 (ulong) xaddr, xlen);
2467                }
2468        }
2469}
2470
2471/*
2472 *      txFreeMap()
2473 *
2474 * function:    free from persistent and/or working map;
2475 *
2476 * todo: optimization
2477 */
2478void txFreeMap(struct inode *ip,
2479               struct maplock * maplock, struct tblock * tblk, int maptype)
2480{
2481        struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2482        struct xdlistlock *xadlistlock;
2483        xad_t *xad;
2484        s64 xaddr;
2485        int xlen;
2486        struct pxd_lock *pxdlock;
2487        struct xdlistlock *pxdlistlock;
2488        pxd_t *pxd;
2489        int n;
2490
2491        jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2492                 tblk, maplock, maptype);
2493
2494        /*
2495         * free from persistent map;
2496         */
2497        if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2498                if (maplock->flag & mlckFREEXADLIST) {
2499                        xadlistlock = (struct xdlistlock *) maplock;
2500                        xad = xadlistlock->xdlist;
2501                        for (n = 0; n < xadlistlock->count; n++, xad++) {
2502                                if (!(xad->flag & XAD_NEW)) {
2503                                        xaddr = addressXAD(xad);
2504                                        xlen = lengthXAD(xad);
2505                                        dbUpdatePMap(ipbmap, true, xaddr,
2506                                                     (s64) xlen, tblk);
2507                                        jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2508                                                 (ulong) xaddr, xlen);
2509                                }
2510                        }
2511                } else if (maplock->flag & mlckFREEPXD) {
2512                        pxdlock = (struct pxd_lock *) maplock;
2513                        xaddr = addressPXD(&pxdlock->pxd);
2514                        xlen = lengthPXD(&pxdlock->pxd);
2515                        dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
2516                                     tblk);
2517                        jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2518                                 (ulong) xaddr, xlen);
2519                } else {        /* (maplock->flag & mlckALLOCPXDLIST) */
2520
2521                        pxdlistlock = (struct xdlistlock *) maplock;
2522                        pxd = pxdlistlock->xdlist;
2523                        for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2524                                xaddr = addressPXD(pxd);
2525                                xlen = lengthPXD(pxd);
2526                                dbUpdatePMap(ipbmap, true, xaddr,
2527                                             (s64) xlen, tblk);
2528                                jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2529                                         (ulong) xaddr, xlen);
2530                        }
2531                }
2532        }
2533
2534        /*
2535         * free from working map;
2536         */
2537        if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2538                if (maplock->flag & mlckFREEXADLIST) {
2539                        xadlistlock = (struct xdlistlock *) maplock;
2540                        xad = xadlistlock->xdlist;
2541                        for (n = 0; n < xadlistlock->count; n++, xad++) {
2542                                xaddr = addressXAD(xad);
2543                                xlen = lengthXAD(xad);
2544                                dbFree(ip, xaddr, (s64) xlen);
2545                                xad->flag = 0;
2546                                jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2547                                         (ulong) xaddr, xlen);
2548                        }
2549                } else if (maplock->flag & mlckFREEPXD) {
2550                        pxdlock = (struct pxd_lock *) maplock;
2551                        xaddr = addressPXD(&pxdlock->pxd);
2552                        xlen = lengthPXD(&pxdlock->pxd);
2553                        dbFree(ip, xaddr, (s64) xlen);
2554                        jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2555                                 (ulong) xaddr, xlen);
2556                } else {        /* (maplock->flag & mlckFREEPXDLIST) */
2557
2558                        pxdlistlock = (struct xdlistlock *) maplock;
2559                        pxd = pxdlistlock->xdlist;
2560                        for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2561                                xaddr = addressPXD(pxd);
2562                                xlen = lengthPXD(pxd);
2563                                dbFree(ip, xaddr, (s64) xlen);
2564                                jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2565                                         (ulong) xaddr, xlen);
2566                        }
2567                }
2568        }
2569}
2570
2571/*
2572 *      txFreelock()
2573 *
2574 * function:    remove tlock from inode anonymous locklist
2575 */
2576void txFreelock(struct inode *ip)
2577{
2578        struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2579        struct tlock *xtlck, *tlck;
2580        lid_t xlid = 0, lid;
2581
2582        if (!jfs_ip->atlhead)
2583                return;
2584
2585        TXN_LOCK();
2586        xtlck = (struct tlock *) &jfs_ip->atlhead;
2587
2588        while ((lid = xtlck->next) != 0) {
2589                tlck = lid_to_tlock(lid);
2590                if (tlck->flag & tlckFREELOCK) {
2591                        xtlck->next = tlck->next;
2592                        txLockFree(lid);
2593                } else {
2594                        xtlck = tlck;
2595                        xlid = lid;
2596                }
2597        }
2598
2599        if (jfs_ip->atlhead)
2600                jfs_ip->atltail = xlid;
2601        else {
2602                jfs_ip->atltail = 0;
2603                /*
2604                 * If inode was on anon_list, remove it
2605                 */
2606                list_del_init(&jfs_ip->anon_inode_list);
2607        }
2608        TXN_UNLOCK();
2609}
2610
2611/*
2612 *      txAbort()
2613 *
2614 * function: abort tx before commit;
2615 *
2616 * frees line-locks and segment locks for all
2617 * segments in comdata structure.
2618 * Optionally sets state of file-system to FM_DIRTY in super-block.
2619 * log age of page-frames in memory for which caller has
2620 * are reset to 0 (to avoid logwarap).
2621 */
2622void txAbort(tid_t tid, int dirty)
2623{
2624        lid_t lid, next;
2625        struct metapage *mp;
2626        struct tblock *tblk = tid_to_tblock(tid);
2627        struct tlock *tlck;
2628
2629        /*
2630         * free tlocks of the transaction
2631         */
2632        for (lid = tblk->next; lid; lid = next) {
2633                tlck = lid_to_tlock(lid);
2634                next = tlck->next;
2635                mp = tlck->mp;
2636                JFS_IP(tlck->ip)->xtlid = 0;
2637
2638                if (mp) {
2639                        mp->lid = 0;
2640
2641                        /*
2642                         * reset lsn of page to avoid logwarap:
2643                         *
2644                         * (page may have been previously committed by another
2645                         * transaction(s) but has not been paged, i.e.,
2646                         * it may be on logsync list even though it has not
2647                         * been logged for the current tx.)
2648                         */
2649                        if (mp->xflag & COMMIT_PAGE && mp->lsn)
2650                                LogSyncRelease(mp);
2651                }
2652                /* insert tlock at head of freelist */
2653                TXN_LOCK();
2654                txLockFree(lid);
2655                TXN_UNLOCK();
2656        }
2657
2658        /* caller will free the transaction block */
2659
2660        tblk->next = tblk->last = 0;
2661
2662        /*
2663         * mark filesystem dirty
2664         */
2665        if (dirty)
2666                jfs_error(tblk->sb, "\n");
2667
2668        return;
2669}
2670
2671/*
2672 *      txLazyCommit(void)
2673 *
2674 *      All transactions except those changing ipimap (COMMIT_FORCE) are
2675 *      processed by this routine.  This insures that the inode and block
2676 *      allocation maps are updated in order.  For synchronous transactions,
2677 *      let the user thread finish processing after txUpdateMap() is called.
2678 */
2679static void txLazyCommit(struct tblock * tblk)
2680{
2681        struct jfs_log *log;
2682
2683        while (((tblk->flag & tblkGC_READY) == 0) &&
2684               ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2685                /* We must have gotten ahead of the user thread
2686                 */
2687                jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2688                yield();
2689        }
2690
2691        jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2692
2693        txUpdateMap(tblk);
2694
2695        log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2696
2697        spin_lock_irq(&log->gclock);    // LOGGC_LOCK
2698
2699        tblk->flag |= tblkGC_COMMITTED;
2700
2701        if (tblk->flag & tblkGC_READY)
2702                log->gcrtc--;
2703
2704        wake_up_all(&tblk->gcwait);     // LOGGC_WAKEUP
2705
2706        /*
2707         * Can't release log->gclock until we've tested tblk->flag
2708         */
2709        if (tblk->flag & tblkGC_LAZY) {
2710                spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
2711                txUnlock(tblk);
2712                tblk->flag &= ~tblkGC_LAZY;
2713                txEnd(tblk - TxBlock);  /* Convert back to tid */
2714        } else
2715                spin_unlock_irq(&log->gclock);  // LOGGC_UNLOCK
2716
2717        jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2718}
2719
2720/*
2721 *      jfs_lazycommit(void)
2722 *
2723 *      To be run as a kernel daemon.  If lbmIODone is called in an interrupt
2724 *      context, or where blocking is not wanted, this routine will process
2725 *      committed transactions from the unlock queue.
2726 */
2727int jfs_lazycommit(void *arg)
2728{
2729        int WorkDone;
2730        struct tblock *tblk;
2731        unsigned long flags;
2732        struct jfs_sb_info *sbi;
2733
2734        do {
2735                LAZY_LOCK(flags);
2736                jfs_commit_thread_waking = 0;   /* OK to wake another thread */
2737                while (!list_empty(&TxAnchor.unlock_queue)) {
2738                        WorkDone = 0;
2739                        list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2740                                            cqueue) {
2741
2742                                sbi = JFS_SBI(tblk->sb);
2743                                /*
2744                                 * For each volume, the transactions must be
2745                                 * handled in order.  If another commit thread
2746                                 * is handling a tblk for this superblock,
2747                                 * skip it
2748                                 */
2749                                if (sbi->commit_state & IN_LAZYCOMMIT)
2750                                        continue;
2751
2752                                sbi->commit_state |= IN_LAZYCOMMIT;
2753                                WorkDone = 1;
2754
2755                                /*
2756                                 * Remove transaction from queue
2757                                 */
2758                                list_del(&tblk->cqueue);
2759
2760                                LAZY_UNLOCK(flags);
2761                                txLazyCommit(tblk);
2762                                LAZY_LOCK(flags);
2763
2764                                sbi->commit_state &= ~IN_LAZYCOMMIT;
2765                                /*
2766                                 * Don't continue in the for loop.  (We can't
2767                                 * anyway, it's unsafe!)  We want to go back to
2768                                 * the beginning of the list.
2769                                 */
2770                                break;
2771                        }
2772
2773                        /* If there was nothing to do, don't continue */
2774                        if (!WorkDone)
2775                                break;
2776                }
2777                /* In case a wakeup came while all threads were active */
2778                jfs_commit_thread_waking = 0;
2779
2780                if (freezing(current)) {
2781                        LAZY_UNLOCK(flags);
2782                        try_to_freeze();
2783                } else {
2784                        DECLARE_WAITQUEUE(wq, current);
2785
2786                        add_wait_queue(&jfs_commit_thread_wait, &wq);
2787                        set_current_state(TASK_INTERRUPTIBLE);
2788                        LAZY_UNLOCK(flags);
2789                        schedule();
2790                        remove_wait_queue(&jfs_commit_thread_wait, &wq);
2791                }
2792        } while (!kthread_should_stop());
2793
2794        if (!list_empty(&TxAnchor.unlock_queue))
2795                jfs_err("jfs_lazycommit being killed w/pending transactions!");
2796        else
2797                jfs_info("jfs_lazycommit being killed");
2798        return 0;
2799}
2800
2801void txLazyUnlock(struct tblock * tblk)
2802{
2803        unsigned long flags;
2804
2805        LAZY_LOCK(flags);
2806
2807        list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2808        /*
2809         * Don't wake up a commit thread if there is already one servicing
2810         * this superblock, or if the last one we woke up hasn't started yet.
2811         */
2812        if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2813            !jfs_commit_thread_waking) {
2814                jfs_commit_thread_waking = 1;
2815                wake_up(&jfs_commit_thread_wait);
2816        }
2817        LAZY_UNLOCK(flags);
2818}
2819
2820static void LogSyncRelease(struct metapage * mp)
2821{
2822        struct jfs_log *log = mp->log;
2823
2824        assert(mp->nohomeok);
2825        assert(log);
2826        metapage_homeok(mp);
2827}
2828
2829/*
2830 *      txQuiesce
2831 *
2832 *      Block all new transactions and push anonymous transactions to
2833 *      completion
2834 *
2835 *      This does almost the same thing as jfs_sync below.  We don't
2836 *      worry about deadlocking when jfs_tlocks_low is set, since we would
2837 *      expect jfs_sync to get us out of that jam.
2838 */
2839void txQuiesce(struct super_block *sb)
2840{
2841        struct inode *ip;
2842        struct jfs_inode_info *jfs_ip;
2843        struct jfs_log *log = JFS_SBI(sb)->log;
2844        tid_t tid;
2845
2846        set_bit(log_QUIESCE, &log->flag);
2847
2848        TXN_LOCK();
2849restart:
2850        while (!list_empty(&TxAnchor.anon_list)) {
2851                jfs_ip = list_entry(TxAnchor.anon_list.next,
2852                                    struct jfs_inode_info,
2853                                    anon_inode_list);
2854                ip = &jfs_ip->vfs_inode;
2855
2856                /*
2857                 * inode will be removed from anonymous list
2858                 * when it is committed
2859                 */
2860                TXN_UNLOCK();
2861                tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2862                mutex_lock(&jfs_ip->commit_mutex);
2863                txCommit(tid, 1, &ip, 0);
2864                txEnd(tid);
2865                mutex_unlock(&jfs_ip->commit_mutex);
2866                /*
2867                 * Just to be safe.  I don't know how
2868                 * long we can run without blocking
2869                 */
2870                cond_resched();
2871                TXN_LOCK();
2872        }
2873
2874        /*
2875         * If jfs_sync is running in parallel, there could be some inodes
2876         * on anon_list2.  Let's check.
2877         */
2878        if (!list_empty(&TxAnchor.anon_list2)) {
2879                list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2880                goto restart;
2881        }
2882        TXN_UNLOCK();
2883
2884        /*
2885         * We may need to kick off the group commit
2886         */
2887        jfs_flush_journal(log, 0);
2888}
2889
2890/*
2891 * txResume()
2892 *
2893 * Allows transactions to start again following txQuiesce
2894 */
2895void txResume(struct super_block *sb)
2896{
2897        struct jfs_log *log = JFS_SBI(sb)->log;
2898
2899        clear_bit(log_QUIESCE, &log->flag);
2900        TXN_WAKEUP(&log->syncwait);
2901}
2902
2903/*
2904 *      jfs_sync(void)
2905 *
2906 *      To be run as a kernel daemon.  This is awakened when tlocks run low.
2907 *      We write any inodes that have anonymous tlocks so they will become
2908 *      available.
2909 */
2910int jfs_sync(void *arg)
2911{
2912        struct inode *ip;
2913        struct jfs_inode_info *jfs_ip;
2914        tid_t tid;
2915
2916        do {
2917                /*
2918                 * write each inode on the anonymous inode list
2919                 */
2920                TXN_LOCK();
2921                while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2922                        jfs_ip = list_entry(TxAnchor.anon_list.next,
2923                                            struct jfs_inode_info,
2924                                            anon_inode_list);
2925                        ip = &jfs_ip->vfs_inode;
2926
2927                        if (! igrab(ip)) {
2928                                /*
2929                                 * Inode is being freed
2930                                 */
2931                                list_del_init(&jfs_ip->anon_inode_list);
2932                        } else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2933                                /*
2934                                 * inode will be removed from anonymous list
2935                                 * when it is committed
2936                                 */
2937                                TXN_UNLOCK();
2938                                tid = txBegin(ip->i_sb, COMMIT_INODE);
2939                                txCommit(tid, 1, &ip, 0);
2940                                txEnd(tid);
2941                                mutex_unlock(&jfs_ip->commit_mutex);
2942
2943                                iput(ip);
2944                                /*
2945                                 * Just to be safe.  I don't know how
2946                                 * long we can run without blocking
2947                                 */
2948                                cond_resched();
2949                                TXN_LOCK();
2950                        } else {
2951                                /* We can't get the commit mutex.  It may
2952                                 * be held by a thread waiting for tlock's
2953                                 * so let's not block here.  Save it to
2954                                 * put back on the anon_list.
2955                                 */
2956
2957                                /* Move from anon_list to anon_list2 */
2958                                list_move(&jfs_ip->anon_inode_list,
2959                                          &TxAnchor.anon_list2);
2960
2961                                TXN_UNLOCK();
2962                                iput(ip);
2963                                TXN_LOCK();
2964                        }
2965                }
2966                /* Add anon_list2 back to anon_list */
2967                list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2968
2969                if (freezing(current)) {
2970                        TXN_UNLOCK();
2971                        try_to_freeze();
2972                } else {
2973                        set_current_state(TASK_INTERRUPTIBLE);
2974                        TXN_UNLOCK();
2975                        schedule();
2976                }
2977        } while (!kthread_should_stop());
2978
2979        jfs_info("jfs_sync being killed");
2980        return 0;
2981}
2982
2983#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
2984int jfs_txanchor_proc_show(struct seq_file *m, void *v)
2985{
2986        char *freewait;
2987        char *freelockwait;
2988        char *lowlockwait;
2989
2990        freewait =
2991            waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
2992        freelockwait =
2993            waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
2994        lowlockwait =
2995            waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
2996
2997        seq_printf(m,
2998                       "JFS TxAnchor\n"
2999                       "============\n"
3000                       "freetid = %d\n"
3001                       "freewait = %s\n"
3002                       "freelock = %d\n"
3003                       "freelockwait = %s\n"
3004                       "lowlockwait = %s\n"
3005                       "tlocksInUse = %d\n"
3006                       "jfs_tlocks_low = %d\n"
3007                       "unlock_queue is %sempty\n",
3008                       TxAnchor.freetid,
3009                       freewait,
3010                       TxAnchor.freelock,
3011                       freelockwait,
3012                       lowlockwait,
3013                       TxAnchor.tlocksInUse,
3014                       jfs_tlocks_low,
3015                       list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
3016        return 0;
3017}
3018#endif
3019
3020#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3021int jfs_txstats_proc_show(struct seq_file *m, void *v)
3022{
3023        seq_printf(m,
3024                       "JFS TxStats\n"
3025                       "===========\n"
3026                       "calls to txBegin = %d\n"
3027                       "txBegin blocked by sync barrier = %d\n"
3028                       "txBegin blocked by tlocks low = %d\n"
3029                       "txBegin blocked by no free tid = %d\n"
3030                       "calls to txBeginAnon = %d\n"
3031                       "txBeginAnon blocked by sync barrier = %d\n"
3032                       "txBeginAnon blocked by tlocks low = %d\n"
3033                       "calls to txLockAlloc = %d\n"
3034                       "tLockAlloc blocked by no free lock = %d\n",
3035                       TxStat.txBegin,
3036                       TxStat.txBegin_barrier,
3037                       TxStat.txBegin_lockslow,
3038                       TxStat.txBegin_freetid,
3039                       TxStat.txBeginAnon,
3040                       TxStat.txBeginAnon_barrier,
3041                       TxStat.txBeginAnon_lockslow,
3042                       TxStat.txLockAlloc,
3043                       TxStat.txLockAlloc_freelock);
3044        return 0;
3045}
3046#endif
3047