1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/fs.h>
46#include <linux/vmalloc.h>
47#include <linux/completion.h>
48#include <linux/freezer.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/kthread.h>
52#include <linux/seq_file.h>
53#include "jfs_incore.h"
54#include "jfs_inode.h"
55#include "jfs_filsys.h"
56#include "jfs_metapage.h"
57#include "jfs_dinode.h"
58#include "jfs_imap.h"
59#include "jfs_dmap.h"
60#include "jfs_superblock.h"
61#include "jfs_debug.h"
62
63
64
65
66static struct {
67 int freetid;
68 int freelock;
69 wait_queue_head_t freewait;
70 wait_queue_head_t freelockwait;
71 wait_queue_head_t lowlockwait;
72 int tlocksInUse;
73 spinlock_t LazyLock;
74
75 struct list_head unlock_queue;
76 struct list_head anon_list;
77 struct list_head anon_list2;
78
79} TxAnchor;
80
81int jfs_tlocks_low;
82
83#ifdef CONFIG_JFS_STATISTICS
84static struct {
85 uint txBegin;
86 uint txBegin_barrier;
87 uint txBegin_lockslow;
88 uint txBegin_freetid;
89 uint txBeginAnon;
90 uint txBeginAnon_barrier;
91 uint txBeginAnon_lockslow;
92 uint txLockAlloc;
93 uint txLockAlloc_freelock;
94} TxStat;
95#endif
96
97static int nTxBlock = -1;
98module_param(nTxBlock, int, 0);
99MODULE_PARM_DESC(nTxBlock,
100 "Number of transaction blocks (max:65536)");
101
102static int nTxLock = -1;
103module_param(nTxLock, int, 0);
104MODULE_PARM_DESC(nTxLock,
105 "Number of transaction locks (max:65536)");
106
107struct tblock *TxBlock;
108static int TxLockLWM;
109static int TxLockHWM;
110static int TxLockVHWM;
111struct tlock *TxLock;
112
113
114
115
116static DEFINE_SPINLOCK(jfsTxnLock);
117
118#define TXN_LOCK() spin_lock(&jfsTxnLock)
119#define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
120
121#define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
122#define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
123#define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
124
125static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
126static int jfs_commit_thread_waking;
127
128
129
130
131static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
132{
133 DECLARE_WAITQUEUE(wait, current);
134
135 add_wait_queue(event, &wait);
136 set_current_state(TASK_UNINTERRUPTIBLE);
137 TXN_UNLOCK();
138 io_schedule();
139 remove_wait_queue(event, &wait);
140}
141
142#define TXN_SLEEP(event)\
143{\
144 TXN_SLEEP_DROP_LOCK(event);\
145 TXN_LOCK();\
146}
147
148#define TXN_WAKEUP(event) wake_up_all(event)
149
150
151
152
153static struct {
154 tid_t maxtid;
155 lid_t maxlid;
156 int ntid;
157 int nlid;
158 int waitlock;
159} stattx;
160
161
162
163
164static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
165 struct tlock * tlck, struct commit * cd);
166static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
167 struct tlock * tlck);
168static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
169 struct tlock * tlck);
170static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
171 struct tlock * tlck);
172static void txAllocPMap(struct inode *ip, struct maplock * maplock,
173 struct tblock * tblk);
174static void txForce(struct tblock * tblk);
175static int txLog(struct jfs_log * log, struct tblock * tblk,
176 struct commit * cd);
177static void txUpdateMap(struct tblock * tblk);
178static void txRelease(struct tblock * tblk);
179static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
180 struct tlock * tlck);
181static void LogSyncRelease(struct metapage * mp);
182
183
184
185
186
187
188
189
190
191
192
193static lid_t txLockAlloc(void)
194{
195 lid_t lid;
196
197 INCREMENT(TxStat.txLockAlloc);
198 if (!TxAnchor.freelock) {
199 INCREMENT(TxStat.txLockAlloc_freelock);
200 }
201
202 while (!(lid = TxAnchor.freelock))
203 TXN_SLEEP(&TxAnchor.freelockwait);
204 TxAnchor.freelock = TxLock[lid].next;
205 HIGHWATERMARK(stattx.maxlid, lid);
206 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
207 jfs_info("txLockAlloc tlocks low");
208 jfs_tlocks_low = 1;
209 wake_up_process(jfsSyncThread);
210 }
211
212 return lid;
213}
214
215static void txLockFree(lid_t lid)
216{
217 TxLock[lid].tid = 0;
218 TxLock[lid].next = TxAnchor.freelock;
219 TxAnchor.freelock = lid;
220 TxAnchor.tlocksInUse--;
221 if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
222 jfs_info("txLockFree jfs_tlocks_low no more");
223 jfs_tlocks_low = 0;
224 TXN_WAKEUP(&TxAnchor.lowlockwait);
225 }
226 TXN_WAKEUP(&TxAnchor.freelockwait);
227}
228
229
230
231
232
233
234
235
236
237
238int txInit(void)
239{
240 int k, size;
241 struct sysinfo si;
242
243
244
245 if (nTxLock == -1) {
246 if (nTxBlock == -1) {
247
248 si_meminfo(&si);
249 if (si.totalram > (256 * 1024))
250 nTxLock = 64 * 1024;
251 else
252 nTxLock = si.totalram >> 2;
253 } else if (nTxBlock > (8 * 1024))
254 nTxLock = 64 * 1024;
255 else
256 nTxLock = nTxBlock << 3;
257 }
258 if (nTxBlock == -1)
259 nTxBlock = nTxLock >> 3;
260
261
262 if (nTxBlock < 16)
263 nTxBlock = 16;
264 if (nTxBlock > 65536)
265 nTxBlock = 65536;
266 if (nTxLock < 256)
267 nTxLock = 256;
268 if (nTxLock > 65536)
269 nTxLock = 65536;
270
271 printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
272 nTxBlock, nTxLock);
273
274
275
276
277
278
279 TxLockLWM = (nTxLock * 4) / 10;
280 TxLockHWM = (nTxLock * 7) / 10;
281 TxLockVHWM = (nTxLock * 8) / 10;
282
283 size = sizeof(struct tblock) * nTxBlock;
284 TxBlock = vmalloc(size);
285 if (TxBlock == NULL)
286 return -ENOMEM;
287
288 for (k = 1; k < nTxBlock - 1; k++) {
289 TxBlock[k].next = k + 1;
290 init_waitqueue_head(&TxBlock[k].gcwait);
291 init_waitqueue_head(&TxBlock[k].waitor);
292 }
293 TxBlock[k].next = 0;
294 init_waitqueue_head(&TxBlock[k].gcwait);
295 init_waitqueue_head(&TxBlock[k].waitor);
296
297 TxAnchor.freetid = 1;
298 init_waitqueue_head(&TxAnchor.freewait);
299
300 stattx.maxtid = 1;
301
302
303
304
305
306
307
308 size = sizeof(struct tlock) * nTxLock;
309 TxLock = vmalloc(size);
310 if (TxLock == NULL) {
311 vfree(TxBlock);
312 return -ENOMEM;
313 }
314
315
316 for (k = 1; k < nTxLock - 1; k++)
317 TxLock[k].next = k + 1;
318 TxLock[k].next = 0;
319 init_waitqueue_head(&TxAnchor.freelockwait);
320 init_waitqueue_head(&TxAnchor.lowlockwait);
321
322 TxAnchor.freelock = 1;
323 TxAnchor.tlocksInUse = 0;
324 INIT_LIST_HEAD(&TxAnchor.anon_list);
325 INIT_LIST_HEAD(&TxAnchor.anon_list2);
326
327 LAZY_LOCK_INIT();
328 INIT_LIST_HEAD(&TxAnchor.unlock_queue);
329
330 stattx.maxlid = 1;
331
332 return 0;
333}
334
335
336
337
338
339
340void txExit(void)
341{
342 vfree(TxLock);
343 TxLock = NULL;
344 vfree(TxBlock);
345 TxBlock = NULL;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361tid_t txBegin(struct super_block *sb, int flag)
362{
363 tid_t t;
364 struct tblock *tblk;
365 struct jfs_log *log;
366
367 jfs_info("txBegin: flag = 0x%x", flag);
368 log = JFS_SBI(sb)->log;
369
370 TXN_LOCK();
371
372 INCREMENT(TxStat.txBegin);
373
374 retry:
375 if (!(flag & COMMIT_FORCE)) {
376
377
378
379 if (test_bit(log_SYNCBARRIER, &log->flag) ||
380 test_bit(log_QUIESCE, &log->flag)) {
381 INCREMENT(TxStat.txBegin_barrier);
382 TXN_SLEEP(&log->syncwait);
383 goto retry;
384 }
385 }
386 if (flag == 0) {
387
388
389
390
391
392 if (TxAnchor.tlocksInUse > TxLockVHWM) {
393 INCREMENT(TxStat.txBegin_lockslow);
394 TXN_SLEEP(&TxAnchor.lowlockwait);
395 goto retry;
396 }
397 }
398
399
400
401
402 if ((t = TxAnchor.freetid) == 0) {
403 jfs_info("txBegin: waiting for free tid");
404 INCREMENT(TxStat.txBegin_freetid);
405 TXN_SLEEP(&TxAnchor.freewait);
406 goto retry;
407 }
408
409 tblk = tid_to_tblock(t);
410
411 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
412
413 jfs_info("txBegin: waiting for free tid");
414 INCREMENT(TxStat.txBegin_freetid);
415 TXN_SLEEP(&TxAnchor.freewait);
416 goto retry;
417 }
418
419 TxAnchor.freetid = tblk->next;
420
421
422
423
424
425
426
427
428
429
430
431 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
432
433 tblk->sb = sb;
434 ++log->logtid;
435 tblk->logtid = log->logtid;
436
437 ++log->active;
438
439 HIGHWATERMARK(stattx.maxtid, t);
440 INCREMENT(stattx.ntid);
441
442 TXN_UNLOCK();
443
444 jfs_info("txBegin: returning tid = %d", t);
445
446 return t;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460void txBeginAnon(struct super_block *sb)
461{
462 struct jfs_log *log;
463
464 log = JFS_SBI(sb)->log;
465
466 TXN_LOCK();
467 INCREMENT(TxStat.txBeginAnon);
468
469 retry:
470
471
472
473 if (test_bit(log_SYNCBARRIER, &log->flag) ||
474 test_bit(log_QUIESCE, &log->flag)) {
475 INCREMENT(TxStat.txBeginAnon_barrier);
476 TXN_SLEEP(&log->syncwait);
477 goto retry;
478 }
479
480
481
482
483 if (TxAnchor.tlocksInUse > TxLockVHWM) {
484 INCREMENT(TxStat.txBeginAnon_lockslow);
485 TXN_SLEEP(&TxAnchor.lowlockwait);
486 goto retry;
487 }
488 TXN_UNLOCK();
489}
490
491
492
493
494
495
496
497
498
499
500void txEnd(tid_t tid)
501{
502 struct tblock *tblk = tid_to_tblock(tid);
503 struct jfs_log *log;
504
505 jfs_info("txEnd: tid = %d", tid);
506 TXN_LOCK();
507
508
509
510
511
512 TXN_WAKEUP(&tblk->waitor);
513
514 log = JFS_SBI(tblk->sb)->log;
515
516
517
518
519
520
521
522
523
524 if (tblk->flag & tblkGC_LAZY) {
525 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
526 TXN_UNLOCK();
527
528 spin_lock_irq(&log->gclock);
529 tblk->flag |= tblkGC_UNLOCKED;
530 spin_unlock_irq(&log->gclock);
531 return;
532 }
533
534 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
535
536 assert(tblk->next == 0);
537
538
539
540
541 tblk->next = TxAnchor.freetid;
542 TxAnchor.freetid = tid;
543
544
545
546
547 if (--log->active == 0) {
548 clear_bit(log_FLUSH, &log->flag);
549
550
551
552
553 if (test_bit(log_SYNCBARRIER, &log->flag)) {
554 TXN_UNLOCK();
555
556
557 jfs_syncpt(log, 1);
558
559 jfs_info("log barrier off: 0x%x", log->lsn);
560
561
562 clear_bit(log_SYNCBARRIER, &log->flag);
563
564
565 TXN_WAKEUP(&log->syncwait);
566
567 goto wakeup;
568 }
569 }
570
571 TXN_UNLOCK();
572wakeup:
573
574
575
576 TXN_WAKEUP(&TxAnchor.freewait);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
591 int type)
592{
593 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
594 int dir_xtree = 0;
595 lid_t lid;
596 tid_t xtid;
597 struct tlock *tlck;
598 struct xtlock *xtlck;
599 struct linelock *linelock;
600 xtpage_t *p;
601 struct tblock *tblk;
602
603 TXN_LOCK();
604
605 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
606 !(mp->xflag & COMMIT_PAGE)) {
607
608
609
610
611 dir_xtree = 1;
612 lid = jfs_ip->xtlid;
613 } else
614 lid = mp->lid;
615
616
617 if (lid == 0)
618 goto allocateLock;
619
620 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
621
622
623 tlck = lid_to_tlock(lid);
624 if ((xtid = tlck->tid) == tid) {
625 TXN_UNLOCK();
626 goto grantLock;
627 }
628
629
630
631
632
633
634
635
636
637
638
639
640 if (xtid == 0) {
641 tlck->tid = tid;
642 TXN_UNLOCK();
643 tblk = tid_to_tblock(tid);
644
645
646
647
648
649
650
651
652
653
654 if (jfs_ip->atlhead == lid) {
655 if (jfs_ip->atltail == lid) {
656
657
658
659 TXN_LOCK();
660 list_del_init(&jfs_ip->anon_inode_list);
661 TXN_UNLOCK();
662 }
663 jfs_ip->atlhead = tlck->next;
664 } else {
665 lid_t last;
666 for (last = jfs_ip->atlhead;
667 lid_to_tlock(last)->next != lid;
668 last = lid_to_tlock(last)->next) {
669 assert(last);
670 }
671 lid_to_tlock(last)->next = tlck->next;
672 if (jfs_ip->atltail == lid)
673 jfs_ip->atltail = last;
674 }
675
676
677
678 if (tblk->next)
679 lid_to_tlock(tblk->last)->next = lid;
680 else
681 tblk->next = lid;
682 tlck->next = 0;
683 tblk->last = lid;
684
685 goto grantLock;
686 }
687
688 goto waitLock;
689
690
691
692
693 allocateLock:
694 lid = txLockAlloc();
695 tlck = lid_to_tlock(lid);
696
697
698
699
700 tlck->tid = tid;
701
702 TXN_UNLOCK();
703
704
705 if (mp->xflag & COMMIT_PAGE) {
706
707 tlck->flag = tlckPAGELOCK;
708
709
710 metapage_nohomeok(mp);
711
712 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
713 mp, mp->nohomeok, tid, tlck);
714
715
716
717
718
719
720 if ((tid == 0) && mp->lsn)
721 set_cflag(COMMIT_Synclist, ip);
722 }
723
724 else
725 tlck->flag = tlckINODELOCK;
726
727 if (S_ISDIR(ip->i_mode))
728 tlck->flag |= tlckDIRECTORY;
729
730 tlck->type = 0;
731
732
733 tlck->ip = ip;
734 tlck->mp = mp;
735 if (dir_xtree)
736 jfs_ip->xtlid = lid;
737 else
738 mp->lid = lid;
739
740
741
742
743
744 if (tid) {
745 tblk = tid_to_tblock(tid);
746 if (tblk->next)
747 lid_to_tlock(tblk->last)->next = lid;
748 else
749 tblk->next = lid;
750 tlck->next = 0;
751 tblk->last = lid;
752 }
753
754
755
756 else {
757 tlck->next = jfs_ip->atlhead;
758 jfs_ip->atlhead = lid;
759 if (tlck->next == 0) {
760
761 jfs_ip->atltail = lid;
762 TXN_LOCK();
763 list_add_tail(&jfs_ip->anon_inode_list,
764 &TxAnchor.anon_list);
765 TXN_UNLOCK();
766 }
767 }
768
769
770 linelock = (struct linelock *) & tlck->lock;
771 linelock->next = 0;
772 linelock->flag = tlckLINELOCK;
773 linelock->maxcnt = TLOCKSHORT;
774 linelock->index = 0;
775
776 switch (type & tlckTYPE) {
777 case tlckDTREE:
778 linelock->l2linesize = L2DTSLOTSIZE;
779 break;
780
781 case tlckXTREE:
782 linelock->l2linesize = L2XTSLOTSIZE;
783
784 xtlck = (struct xtlock *) linelock;
785 xtlck->header.offset = 0;
786 xtlck->header.length = 2;
787
788 if (type & tlckNEW) {
789 xtlck->lwm.offset = XTENTRYSTART;
790 } else {
791 if (mp->xflag & COMMIT_PAGE)
792 p = (xtpage_t *) mp->data;
793 else
794 p = &jfs_ip->i_xtroot;
795 xtlck->lwm.offset =
796 le16_to_cpu(p->header.nextindex);
797 }
798 xtlck->lwm.length = 0;
799 xtlck->twm.offset = 0;
800 xtlck->hwm.offset = 0;
801
802 xtlck->index = 2;
803 break;
804
805 case tlckINODE:
806 linelock->l2linesize = L2INODESLOTSIZE;
807 break;
808
809 case tlckDATA:
810 linelock->l2linesize = L2DATASLOTSIZE;
811 break;
812
813 default:
814 jfs_err("UFO tlock:0x%p", tlck);
815 }
816
817
818
819
820 grantLock:
821 tlck->type |= type;
822
823 return tlck;
824
825
826
827
828 waitLock:
829
830
831 if (jfs_ip->fileset != AGGREGATE_I) {
832 printk(KERN_ERR "txLock: trying to lock locked page!");
833 print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
834 ip, sizeof(*ip), 0);
835 print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
836 mp, sizeof(*mp), 0);
837 print_hex_dump(KERN_ERR, "Locker's tblock: ",
838 DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
839 sizeof(struct tblock), 0);
840 print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
841 tlck, sizeof(*tlck), 0);
842 BUG();
843 }
844 INCREMENT(stattx.waitlock);
845 TXN_UNLOCK();
846 release_metapage(mp);
847 TXN_LOCK();
848 xtid = tlck->tid;
849
850 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
851 tid, xtid, lid);
852
853
854 if (xtid && (tlck->mp == mp) && (mp->lid == lid))
855 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
856 else
857 TXN_UNLOCK();
858 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
859
860 return NULL;
861}
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876static void txRelease(struct tblock * tblk)
877{
878 struct metapage *mp;
879 lid_t lid;
880 struct tlock *tlck;
881
882 TXN_LOCK();
883
884 for (lid = tblk->next; lid; lid = tlck->next) {
885 tlck = lid_to_tlock(lid);
886 if ((mp = tlck->mp) != NULL &&
887 (tlck->type & tlckBTROOT) == 0) {
888 assert(mp->xflag & COMMIT_PAGE);
889 mp->lid = 0;
890 }
891 }
892
893
894
895
896
897 TXN_WAKEUP(&tblk->waitor);
898
899 TXN_UNLOCK();
900}
901
902
903
904
905
906
907
908static void txUnlock(struct tblock * tblk)
909{
910 struct tlock *tlck;
911 struct linelock *linelock;
912 lid_t lid, next, llid, k;
913 struct metapage *mp;
914 struct jfs_log *log;
915 int difft, diffp;
916 unsigned long flags;
917
918 jfs_info("txUnlock: tblk = 0x%p", tblk);
919 log = JFS_SBI(tblk->sb)->log;
920
921
922
923
924 for (lid = tblk->next; lid; lid = next) {
925 tlck = lid_to_tlock(lid);
926 next = tlck->next;
927
928 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
929
930
931 if ((mp = tlck->mp) != NULL &&
932 (tlck->type & tlckBTROOT) == 0) {
933 assert(mp->xflag & COMMIT_PAGE);
934
935
936
937 hold_metapage(mp);
938
939 assert(mp->nohomeok > 0);
940 _metapage_homeok(mp);
941
942
943 LOGSYNC_LOCK(log, flags);
944 if (mp->clsn) {
945 logdiff(difft, tblk->clsn, log);
946 logdiff(diffp, mp->clsn, log);
947 if (difft > diffp)
948 mp->clsn = tblk->clsn;
949 } else
950 mp->clsn = tblk->clsn;
951 LOGSYNC_UNLOCK(log, flags);
952
953 assert(!(tlck->flag & tlckFREEPAGE));
954
955 put_metapage(mp);
956 }
957
958
959
960
961 TXN_LOCK();
962
963 llid = ((struct linelock *) & tlck->lock)->next;
964 while (llid) {
965 linelock = (struct linelock *) lid_to_tlock(llid);
966 k = linelock->next;
967 txLockFree(llid);
968 llid = k;
969 }
970 txLockFree(lid);
971
972 TXN_UNLOCK();
973 }
974 tblk->next = tblk->last = 0;
975
976
977
978
979
980
981 if (tblk->lsn) {
982 LOGSYNC_LOCK(log, flags);
983 log->count--;
984 list_del(&tblk->synclist);
985 LOGSYNC_UNLOCK(log, flags);
986 }
987}
988
989
990
991
992
993
994
995struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
996{
997 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
998 lid_t lid;
999 struct tblock *tblk;
1000 struct tlock *tlck;
1001 struct maplock *maplock;
1002
1003 TXN_LOCK();
1004
1005
1006
1007
1008 lid = txLockAlloc();
1009 tlck = lid_to_tlock(lid);
1010
1011
1012
1013
1014 tlck->tid = tid;
1015
1016
1017 tlck->flag = tlckINODELOCK;
1018 if (S_ISDIR(ip->i_mode))
1019 tlck->flag |= tlckDIRECTORY;
1020 tlck->ip = ip;
1021 tlck->mp = NULL;
1022
1023 tlck->type = type;
1024
1025
1026
1027
1028
1029 if (tid) {
1030 tblk = tid_to_tblock(tid);
1031 if (tblk->next)
1032 lid_to_tlock(tblk->last)->next = lid;
1033 else
1034 tblk->next = lid;
1035 tlck->next = 0;
1036 tblk->last = lid;
1037 }
1038
1039
1040
1041 else {
1042 tlck->next = jfs_ip->atlhead;
1043 jfs_ip->atlhead = lid;
1044 if (tlck->next == 0) {
1045
1046 jfs_ip->atltail = lid;
1047 list_add_tail(&jfs_ip->anon_inode_list,
1048 &TxAnchor.anon_list);
1049 }
1050 }
1051
1052 TXN_UNLOCK();
1053
1054
1055 maplock = (struct maplock *) & tlck->lock;
1056 maplock->next = 0;
1057 maplock->maxcnt = 0;
1058 maplock->index = 0;
1059
1060 return tlck;
1061}
1062
1063
1064
1065
1066
1067
1068struct linelock *txLinelock(struct linelock * tlock)
1069{
1070 lid_t lid;
1071 struct tlock *tlck;
1072 struct linelock *linelock;
1073
1074 TXN_LOCK();
1075
1076
1077 lid = txLockAlloc();
1078 tlck = lid_to_tlock(lid);
1079
1080 TXN_UNLOCK();
1081
1082
1083 linelock = (struct linelock *) tlck;
1084 linelock->next = 0;
1085 linelock->flag = tlckLINELOCK;
1086 linelock->maxcnt = TLOCKLONG;
1087 linelock->index = 0;
1088 if (tlck->flag & tlckDIRECTORY)
1089 linelock->flag |= tlckDIRECTORY;
1090
1091
1092 linelock->next = tlock->next;
1093 tlock->next = lid;
1094
1095 return linelock;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135int txCommit(tid_t tid,
1136 int nip,
1137 struct inode **iplist,
1138 int flag)
1139{
1140 int rc = 0;
1141 struct commit cd;
1142 struct jfs_log *log;
1143 struct tblock *tblk;
1144 struct lrd *lrd;
1145 struct inode *ip;
1146 struct jfs_inode_info *jfs_ip;
1147 int k, n;
1148 ino_t top;
1149 struct super_block *sb;
1150
1151 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1152
1153 if (isReadOnly(iplist[0])) {
1154 rc = -EROFS;
1155 goto TheEnd;
1156 }
1157
1158 sb = cd.sb = iplist[0]->i_sb;
1159 cd.tid = tid;
1160
1161 if (tid == 0)
1162 tid = txBegin(sb, 0);
1163 tblk = tid_to_tblock(tid);
1164
1165
1166
1167
1168 log = JFS_SBI(sb)->log;
1169 cd.log = log;
1170
1171
1172 lrd = &cd.lrd;
1173 lrd->logtid = cpu_to_le32(tblk->logtid);
1174 lrd->backchain = 0;
1175
1176 tblk->xflag |= flag;
1177
1178 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1179 tblk->xflag |= COMMIT_LAZY;
1180
1181
1182
1183
1184
1185
1186
1187
1188 cd.iplist = iplist;
1189 cd.nip = nip;
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 for (k = 0; k < cd.nip; k++) {
1204 top = (cd.iplist[k])->i_ino;
1205 for (n = k + 1; n < cd.nip; n++) {
1206 ip = cd.iplist[n];
1207 if (ip->i_ino > top) {
1208 top = ip->i_ino;
1209 cd.iplist[n] = cd.iplist[k];
1210 cd.iplist[k] = ip;
1211 }
1212 }
1213
1214 ip = cd.iplist[k];
1215 jfs_ip = JFS_IP(ip);
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 clear_cflag(COMMIT_Dirty, ip);
1246
1247
1248 if (jfs_ip->atlhead) {
1249 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1250 tblk->next = jfs_ip->atlhead;
1251 if (!tblk->last)
1252 tblk->last = jfs_ip->atltail;
1253 jfs_ip->atlhead = jfs_ip->atltail = 0;
1254 TXN_LOCK();
1255 list_del_init(&jfs_ip->anon_inode_list);
1256 TXN_UNLOCK();
1257 }
1258
1259
1260
1261
1262
1263 if (((rc = diWrite(tid, ip))))
1264 goto out;
1265 }
1266
1267
1268
1269
1270
1271
1272 if ((rc = txLog(log, tblk, &cd)))
1273 goto TheEnd;
1274
1275
1276
1277
1278
1279 if (tblk->xflag & COMMIT_DELETE) {
1280 ihold(tblk->u.ip);
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 if (tblk->u.ip->i_state & I_SYNC)
1299 tblk->xflag &= ~COMMIT_LAZY;
1300 }
1301
1302 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1303 ((tblk->u.ip->i_nlink == 0) &&
1304 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1305
1306
1307
1308
1309 lrd->type = cpu_to_le16(LOG_COMMIT);
1310 lrd->length = 0;
1311 lmLog(log, tblk, lrd, NULL);
1312
1313 lmGroupCommit(log, tblk);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 if (flag & COMMIT_FORCE)
1324 txForce(tblk);
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335 if (tblk->xflag & COMMIT_FORCE)
1336 txUpdateMap(tblk);
1337
1338
1339
1340
1341 txRelease(tblk);
1342
1343 if ((tblk->flag & tblkGC_LAZY) == 0)
1344 txUnlock(tblk);
1345
1346
1347
1348
1349
1350 for (k = 0; k < cd.nip; k++) {
1351 ip = cd.iplist[k];
1352 jfs_ip = JFS_IP(ip);
1353
1354
1355
1356
1357 jfs_ip->bxflag = 0;
1358 jfs_ip->blid = 0;
1359 }
1360
1361 out:
1362 if (rc != 0)
1363 txAbort(tid, 1);
1364
1365 TheEnd:
1366 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1367 return rc;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1382{
1383 int rc = 0;
1384 struct inode *ip;
1385 lid_t lid;
1386 struct tlock *tlck;
1387 struct lrd *lrd = &cd->lrd;
1388
1389
1390
1391
1392 for (lid = tblk->next; lid; lid = tlck->next) {
1393 tlck = lid_to_tlock(lid);
1394
1395 tlck->flag |= tlckLOG;
1396
1397
1398 ip = tlck->ip;
1399 lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1400 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1401 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1402
1403
1404 switch (tlck->type & tlckTYPE) {
1405 case tlckXTREE:
1406 xtLog(log, tblk, lrd, tlck);
1407 break;
1408
1409 case tlckDTREE:
1410 dtLog(log, tblk, lrd, tlck);
1411 break;
1412
1413 case tlckINODE:
1414 diLog(log, tblk, lrd, tlck, cd);
1415 break;
1416
1417 case tlckMAP:
1418 mapLog(log, tblk, lrd, tlck);
1419 break;
1420
1421 case tlckDATA:
1422 dataLog(log, tblk, lrd, tlck);
1423 break;
1424
1425 default:
1426 jfs_err("UFO tlock:0x%p", tlck);
1427 }
1428 }
1429
1430 return rc;
1431}
1432
1433
1434
1435
1436
1437
1438static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1439 struct tlock * tlck, struct commit * cd)
1440{
1441 int rc = 0;
1442 struct metapage *mp;
1443 pxd_t *pxd;
1444 struct pxd_lock *pxdlock;
1445
1446 mp = tlck->mp;
1447
1448
1449 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1450 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1451
1452 pxd = &lrd->log.redopage.pxd;
1453
1454
1455
1456
1457 if (tlck->type & tlckENTRY) {
1458
1459 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1460 PXDaddress(pxd, mp->index);
1461 PXDlength(pxd,
1462 mp->logical_size >> tblk->sb->s_blocksize_bits);
1463 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1464
1465
1466 tlck->flag |= tlckWRITEPAGE;
1467 } else if (tlck->type & tlckFREE) {
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1486
1487
1488
1489
1490
1491
1492
1493 lrd->log.noredoinoext.iagnum =
1494 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1495 lrd->log.noredoinoext.inoext_idx =
1496 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1497
1498 pxdlock = (struct pxd_lock *) & tlck->lock;
1499 *pxd = pxdlock->pxd;
1500 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1501
1502
1503 tlck->flag |= tlckUPDATEMAP;
1504
1505
1506 tlck->flag |= tlckWRITEPAGE;
1507 } else
1508 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1509#ifdef _JFS_WIP
1510
1511
1512
1513
1514
1515
1516 else {
1517 assert(tlck->type & tlckEA);
1518
1519
1520
1521
1522 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1523 pxdlock = (struct pxd_lock *) & tlck->lock;
1524 nlock = pxdlock->index;
1525 for (i = 0; i < nlock; i++, pxdlock++) {
1526 if (pxdlock->flag & mlckALLOCPXD)
1527 lrd->log.updatemap.type =
1528 cpu_to_le16(LOG_ALLOCPXD);
1529 else
1530 lrd->log.updatemap.type =
1531 cpu_to_le16(LOG_FREEPXD);
1532 lrd->log.updatemap.nxd = cpu_to_le16(1);
1533 lrd->log.updatemap.pxd = pxdlock->pxd;
1534 lrd->backchain =
1535 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1536 }
1537
1538
1539 tlck->flag |= tlckUPDATEMAP;
1540 }
1541#endif
1542
1543 return rc;
1544}
1545
1546
1547
1548
1549
1550
1551static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1552 struct tlock * tlck)
1553{
1554 struct metapage *mp;
1555 pxd_t *pxd;
1556
1557 mp = tlck->mp;
1558
1559
1560 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1561 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1562
1563 pxd = &lrd->log.redopage.pxd;
1564
1565
1566 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1567
1568 if (jfs_dirtable_inline(tlck->ip)) {
1569
1570
1571
1572
1573 mp->lid = 0;
1574 grab_metapage(mp);
1575 metapage_homeok(mp);
1576 discard_metapage(mp);
1577 tlck->mp = NULL;
1578 return 0;
1579 }
1580
1581 PXDaddress(pxd, mp->index);
1582 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1583
1584 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1585
1586
1587 tlck->flag |= tlckWRITEPAGE;
1588
1589 return 0;
1590}
1591
1592
1593
1594
1595
1596
1597static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1598 struct tlock * tlck)
1599{
1600 struct metapage *mp;
1601 struct pxd_lock *pxdlock;
1602 pxd_t *pxd;
1603
1604 mp = tlck->mp;
1605
1606
1607 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1608 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1609
1610 pxd = &lrd->log.redopage.pxd;
1611
1612 if (tlck->type & tlckBTROOT)
1613 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1614
1615
1616
1617
1618
1619
1620
1621 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1622
1623
1624
1625
1626 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1627 if (tlck->type & tlckEXTEND)
1628 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1629 else
1630 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1631 PXDaddress(pxd, mp->index);
1632 PXDlength(pxd,
1633 mp->logical_size >> tblk->sb->s_blocksize_bits);
1634 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1635
1636
1637
1638
1639 if (tlck->type & tlckBTROOT)
1640 return;
1641 tlck->flag |= tlckUPDATEMAP;
1642 pxdlock = (struct pxd_lock *) & tlck->lock;
1643 pxdlock->flag = mlckALLOCPXD;
1644 pxdlock->pxd = *pxd;
1645
1646 pxdlock->index = 1;
1647
1648
1649 tlck->flag |= tlckWRITEPAGE;
1650 return;
1651 }
1652
1653
1654
1655
1656
1657 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1658
1659 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1660 PXDaddress(pxd, mp->index);
1661 PXDlength(pxd,
1662 mp->logical_size >> tblk->sb->s_blocksize_bits);
1663 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1664
1665
1666 tlck->flag |= tlckWRITEPAGE;
1667 return;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1678
1679
1680
1681
1682 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1683 pxdlock = (struct pxd_lock *) & tlck->lock;
1684 *pxd = pxdlock->pxd;
1685 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1686
1687
1688
1689
1690 tlck->flag |= tlckUPDATEMAP;
1691 }
1692 return;
1693}
1694
1695
1696
1697
1698
1699
1700static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1701 struct tlock * tlck)
1702{
1703 struct inode *ip;
1704 struct metapage *mp;
1705 xtpage_t *p;
1706 struct xtlock *xtlck;
1707 struct maplock *maplock;
1708 struct xdlistlock *xadlock;
1709 struct pxd_lock *pxdlock;
1710 pxd_t *page_pxd;
1711 int next, lwm, hwm;
1712
1713 ip = tlck->ip;
1714 mp = tlck->mp;
1715
1716
1717 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1718 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1719
1720 page_pxd = &lrd->log.redopage.pxd;
1721
1722 if (tlck->type & tlckBTROOT) {
1723 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1724 p = &JFS_IP(ip)->i_xtroot;
1725 if (S_ISDIR(ip->i_mode))
1726 lrd->log.redopage.type |=
1727 cpu_to_le16(LOG_DIR_XTREE);
1728 } else
1729 p = (xtpage_t *) mp->data;
1730 next = le16_to_cpu(p->header.nextindex);
1731
1732 xtlck = (struct xtlock *) & tlck->lock;
1733
1734 maplock = (struct maplock *) & tlck->lock;
1735 xadlock = (struct xdlistlock *) maplock;
1736
1737
1738
1739
1740
1741 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1742
1743
1744
1745
1746
1747
1748
1749 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1750 PXDaddress(page_pxd, mp->index);
1751 PXDlength(page_pxd,
1752 mp->logical_size >> tblk->sb->s_blocksize_bits);
1753 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1754
1755
1756
1757
1758
1759
1760 lwm = xtlck->lwm.offset;
1761 if (lwm == 0)
1762 lwm = XTPAGEMAXSLOT;
1763
1764 if (lwm == next)
1765 goto out;
1766 if (lwm > next) {
1767 jfs_err("xtLog: lwm > next\n");
1768 goto out;
1769 }
1770 tlck->flag |= tlckUPDATEMAP;
1771 xadlock->flag = mlckALLOCXADLIST;
1772 xadlock->count = next - lwm;
1773 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1774 int i;
1775 pxd_t *pxd;
1776
1777
1778
1779
1780
1781
1782
1783 xadlock->flag = mlckALLOCPXDLIST;
1784 pxd = xadlock->xdlist = &xtlck->pxdlock;
1785 for (i = 0; i < xadlock->count; i++) {
1786 PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1787 PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1788 p->xad[lwm + i].flag &=
1789 ~(XAD_NEW | XAD_EXTENDED);
1790 pxd++;
1791 }
1792 } else {
1793
1794
1795
1796
1797 xadlock->flag = mlckALLOCXADLIST;
1798 xadlock->xdlist = &p->xad[lwm];
1799 tblk->xflag &= ~COMMIT_LAZY;
1800 }
1801 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
1802 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
1803
1804 maplock->index = 1;
1805
1806 out:
1807
1808 tlck->flag |= tlckWRITEPAGE;
1809
1810 return;
1811 }
1812
1813
1814
1815
1816
1817
1818
1819 if (tlck->type & tlckFREE) {
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 if (tblk->xflag & COMMIT_TRUNCATE) {
1837
1838 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1839 PXDaddress(page_pxd, mp->index);
1840 PXDlength(page_pxd,
1841 mp->logical_size >> tblk->sb->
1842 s_blocksize_bits);
1843 lrd->backchain =
1844 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1845
1846 if (tlck->type & tlckBTROOT) {
1847
1848 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1849 lrd->backchain =
1850 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1851 }
1852 }
1853
1854
1855
1856
1857
1858 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1859 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1860 xtlck = (struct xtlock *) & tlck->lock;
1861 hwm = xtlck->hwm.offset;
1862 lrd->log.updatemap.nxd =
1863 cpu_to_le16(hwm - XTENTRYSTART + 1);
1864
1865 xtlck->header.offset = XTENTRYSTART;
1866 xtlck->header.length = hwm - XTENTRYSTART + 1;
1867 xtlck->index = 1;
1868 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1869
1870
1871
1872
1873
1874 tlck->flag |= tlckUPDATEMAP;
1875 xadlock->count = hwm - XTENTRYSTART + 1;
1876 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1877 int i;
1878 pxd_t *pxd;
1879
1880
1881
1882
1883
1884
1885
1886 xadlock->flag = mlckFREEPXDLIST;
1887 pxd = xadlock->xdlist = &xtlck->pxdlock;
1888 for (i = 0; i < xadlock->count; i++) {
1889 PXDaddress(pxd,
1890 addressXAD(&p->xad[XTENTRYSTART + i]));
1891 PXDlength(pxd,
1892 lengthXAD(&p->xad[XTENTRYSTART + i]));
1893 pxd++;
1894 }
1895 } else {
1896
1897
1898
1899
1900 xadlock->flag = mlckFREEXADLIST;
1901 xadlock->xdlist = &p->xad[XTENTRYSTART];
1902 tblk->xflag &= ~COMMIT_LAZY;
1903 }
1904 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1905 tlck->ip, mp, xadlock->count);
1906
1907 maplock->index = 1;
1908
1909
1910 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1911 && !(tlck->type & tlckBTROOT))
1912 tlck->flag |= tlckFREEPAGE;
1913
1914
1915
1916
1917 return;
1918 }
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 if (tlck->type & tlckTRUNCATE) {
1931
1932 pxd_t pxd = pxd;
1933 int twm;
1934
1935
1936
1937
1938
1939
1940
1941
1942 tblk->xflag &= ~COMMIT_LAZY;
1943 lwm = xtlck->lwm.offset;
1944 if (lwm == 0)
1945 lwm = XTPAGEMAXSLOT;
1946 hwm = xtlck->hwm.offset;
1947 twm = xtlck->twm.offset;
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1961 PXDaddress(page_pxd, mp->index);
1962 PXDlength(page_pxd,
1963 mp->logical_size >> tblk->sb->s_blocksize_bits);
1964 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1965
1966
1967
1968
1969 if (twm == next - 1) {
1970
1971
1972
1973
1974
1975 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1976
1977 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1978 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1979 lrd->log.updatemap.nxd = cpu_to_le16(1);
1980 lrd->log.updatemap.pxd = pxdlock->pxd;
1981 pxd = pxdlock->pxd;
1982 lrd->backchain =
1983 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1984 }
1985
1986
1987
1988
1989 if (hwm >= next) {
1990
1991
1992
1993
1994 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1995 lrd->log.updatemap.type =
1996 cpu_to_le16(LOG_FREEXADLIST);
1997 xtlck = (struct xtlock *) & tlck->lock;
1998 hwm = xtlck->hwm.offset;
1999 lrd->log.updatemap.nxd =
2000 cpu_to_le16(hwm - next + 1);
2001
2002 xtlck->header.offset = next;
2003 xtlck->header.length = hwm - next + 1;
2004 xtlck->index = 1;
2005 lrd->backchain =
2006 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
2007 }
2008
2009
2010
2011
2012 maplock->index = 0;
2013
2014
2015
2016
2017 if (lwm < next) {
2018
2019
2020
2021
2022
2023 tlck->flag |= tlckUPDATEMAP;
2024 xadlock->flag = mlckALLOCXADLIST;
2025 xadlock->count = next - lwm;
2026 xadlock->xdlist = &p->xad[lwm];
2027
2028 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
2029 "lwm:%d next:%d",
2030 tlck->ip, mp, xadlock->count, lwm, next);
2031 maplock->index++;
2032 xadlock++;
2033 }
2034
2035
2036
2037
2038 if (twm == next - 1) {
2039
2040
2041
2042
2043
2044 tlck->flag |= tlckUPDATEMAP;
2045 pxdlock = (struct pxd_lock *) xadlock;
2046 pxdlock->flag = mlckFREEPXD;
2047 pxdlock->count = 1;
2048 pxdlock->pxd = pxd;
2049
2050 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
2051 "hwm:%d", ip, mp, pxdlock->count, hwm);
2052 maplock->index++;
2053 xadlock++;
2054 }
2055
2056
2057
2058
2059 if (hwm >= next) {
2060
2061
2062
2063
2064 tlck->flag |= tlckUPDATEMAP;
2065 xadlock->flag = mlckFREEXADLIST;
2066 xadlock->count = hwm - next + 1;
2067 xadlock->xdlist = &p->xad[next];
2068
2069 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
2070 "next:%d hwm:%d",
2071 tlck->ip, mp, xadlock->count, next, hwm);
2072 maplock->index++;
2073 }
2074
2075
2076 tlck->flag |= tlckWRITEPAGE;
2077 }
2078 return;
2079}
2080
2081
2082
2083
2084
2085
2086static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2087 struct tlock * tlck)
2088{
2089 struct pxd_lock *pxdlock;
2090 int i, nlock;
2091 pxd_t *pxd;
2092
2093
2094
2095
2096
2097
2098
2099
2100 if (tlck->type & tlckRELOCATE) {
2101
2102
2103
2104 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2105 pxdlock = (struct pxd_lock *) & tlck->lock;
2106 pxd = &lrd->log.redopage.pxd;
2107 *pxd = pxdlock->pxd;
2108 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2121 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2122 lrd->log.updatemap.nxd = cpu_to_le16(1);
2123 lrd->log.updatemap.pxd = pxdlock->pxd;
2124 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2125
2126
2127
2128
2129 tlck->flag |= tlckUPDATEMAP;
2130 return;
2131 }
2132
2133
2134
2135
2136
2137 else {
2138
2139
2140
2141
2142
2143 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2144 pxdlock = (struct pxd_lock *) & tlck->lock;
2145 nlock = pxdlock->index;
2146 for (i = 0; i < nlock; i++, pxdlock++) {
2147 if (pxdlock->flag & mlckALLOCPXD)
2148 lrd->log.updatemap.type =
2149 cpu_to_le16(LOG_ALLOCPXD);
2150 else
2151 lrd->log.updatemap.type =
2152 cpu_to_le16(LOG_FREEPXD);
2153 lrd->log.updatemap.nxd = cpu_to_le16(1);
2154 lrd->log.updatemap.pxd = pxdlock->pxd;
2155 lrd->backchain =
2156 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2157 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2158 (ulong) addressPXD(&pxdlock->pxd),
2159 lengthPXD(&pxdlock->pxd));
2160 }
2161
2162
2163 tlck->flag |= tlckUPDATEMAP;
2164 }
2165}
2166
2167
2168
2169
2170
2171
2172
2173void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2174{
2175 struct tlock *tlck = NULL;
2176 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2177
2178
2179
2180
2181 if (newea) {
2182
2183
2184
2185
2186 if (newea->flag & DXD_EXTENT) {
2187 tlck = txMaplock(tid, ip, tlckMAP);
2188 maplock = (struct pxd_lock *) & tlck->lock;
2189 pxdlock = (struct pxd_lock *) maplock;
2190 pxdlock->flag = mlckALLOCPXD;
2191 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2192 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2193 pxdlock++;
2194 maplock->index = 1;
2195 } else if (newea->flag & DXD_INLINE) {
2196 tlck = NULL;
2197
2198 set_cflag(COMMIT_Inlineea, ip);
2199 }
2200 }
2201
2202
2203
2204
2205 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2206 if (tlck == NULL) {
2207 tlck = txMaplock(tid, ip, tlckMAP);
2208 maplock = (struct pxd_lock *) & tlck->lock;
2209 pxdlock = (struct pxd_lock *) maplock;
2210 maplock->index = 0;
2211 }
2212 pxdlock->flag = mlckFREEPXD;
2213 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2214 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2215 maplock->index++;
2216 }
2217}
2218
2219
2220
2221
2222
2223
2224
2225static void txForce(struct tblock * tblk)
2226{
2227 struct tlock *tlck;
2228 lid_t lid, next;
2229 struct metapage *mp;
2230
2231
2232
2233
2234
2235
2236 tlck = lid_to_tlock(tblk->next);
2237 lid = tlck->next;
2238 tlck->next = 0;
2239 while (lid) {
2240 tlck = lid_to_tlock(lid);
2241 next = tlck->next;
2242 tlck->next = tblk->next;
2243 tblk->next = lid;
2244 lid = next;
2245 }
2246
2247
2248
2249
2250
2251 for (lid = tblk->next; lid; lid = next) {
2252 tlck = lid_to_tlock(lid);
2253 next = tlck->next;
2254
2255 if ((mp = tlck->mp) != NULL &&
2256 (tlck->type & tlckBTROOT) == 0) {
2257 assert(mp->xflag & COMMIT_PAGE);
2258
2259 if (tlck->flag & tlckWRITEPAGE) {
2260 tlck->flag &= ~tlckWRITEPAGE;
2261
2262
2263 force_metapage(mp);
2264#if 0
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 assert(mp->nohomeok);
2277 set_bit(META_dirty, &mp->flag);
2278 set_bit(META_sync, &mp->flag);
2279#endif
2280 }
2281 }
2282 }
2283}
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293static void txUpdateMap(struct tblock * tblk)
2294{
2295 struct inode *ip;
2296 struct inode *ipimap;
2297 lid_t lid;
2298 struct tlock *tlck;
2299 struct maplock *maplock;
2300 struct pxd_lock pxdlock;
2301 int maptype;
2302 int k, nlock;
2303 struct metapage *mp = NULL;
2304
2305 ipimap = JFS_SBI(tblk->sb)->ipimap;
2306
2307 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322 for (lid = tblk->next; lid; lid = tlck->next) {
2323 tlck = lid_to_tlock(lid);
2324
2325 if ((tlck->flag & tlckUPDATEMAP) == 0)
2326 continue;
2327
2328 if (tlck->flag & tlckFREEPAGE) {
2329
2330
2331
2332
2333
2334
2335
2336 mp = tlck->mp;
2337 ASSERT(mp->xflag & COMMIT_PAGE);
2338 grab_metapage(mp);
2339 }
2340
2341
2342
2343
2344
2345
2346 maplock = (struct maplock *) & tlck->lock;
2347 nlock = maplock->index;
2348
2349 for (k = 0; k < nlock; k++, maplock++) {
2350
2351
2352
2353
2354
2355 if (maplock->flag & mlckALLOC) {
2356 txAllocPMap(ipimap, maplock, tblk);
2357 }
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 else {
2373
2374 if (tlck->flag & tlckDIRECTORY)
2375 txFreeMap(ipimap, maplock,
2376 tblk, COMMIT_PWMAP);
2377 else
2378 txFreeMap(ipimap, maplock,
2379 tblk, maptype);
2380 }
2381 }
2382 if (tlck->flag & tlckFREEPAGE) {
2383 if (!(tblk->flag & tblkGC_LAZY)) {
2384
2385 ASSERT(mp->lid == lid);
2386 tlck->mp->lid = 0;
2387 }
2388 assert(mp->nohomeok == 1);
2389 metapage_homeok(mp);
2390 discard_metapage(mp);
2391 tlck->mp = NULL;
2392 }
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 if (tblk->xflag & COMMIT_CREATE) {
2404 diUpdatePMap(ipimap, tblk->ino, false, tblk);
2405
2406
2407
2408 pxdlock.flag = mlckALLOCPXD;
2409 pxdlock.pxd = tblk->u.ixpxd;
2410 pxdlock.index = 1;
2411 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2412 } else if (tblk->xflag & COMMIT_DELETE) {
2413 ip = tblk->u.ip;
2414 diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2415 iput(ip);
2416 }
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2440 struct tblock * tblk)
2441{
2442 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2443 struct xdlistlock *xadlistlock;
2444 xad_t *xad;
2445 s64 xaddr;
2446 int xlen;
2447 struct pxd_lock *pxdlock;
2448 struct xdlistlock *pxdlistlock;
2449 pxd_t *pxd;
2450 int n;
2451
2452
2453
2454
2455 if (maplock->flag & mlckALLOCXADLIST) {
2456 xadlistlock = (struct xdlistlock *) maplock;
2457 xad = xadlistlock->xdlist;
2458 for (n = 0; n < xadlistlock->count; n++, xad++) {
2459 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2460 xaddr = addressXAD(xad);
2461 xlen = lengthXAD(xad);
2462 dbUpdatePMap(ipbmap, false, xaddr,
2463 (s64) xlen, tblk);
2464 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2465 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2466 (ulong) xaddr, xlen);
2467 }
2468 }
2469 } else if (maplock->flag & mlckALLOCPXD) {
2470 pxdlock = (struct pxd_lock *) maplock;
2471 xaddr = addressPXD(&pxdlock->pxd);
2472 xlen = lengthPXD(&pxdlock->pxd);
2473 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2474 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2475 } else {
2476
2477 pxdlistlock = (struct xdlistlock *) maplock;
2478 pxd = pxdlistlock->xdlist;
2479 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2480 xaddr = addressPXD(pxd);
2481 xlen = lengthPXD(pxd);
2482 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
2483 tblk);
2484 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2485 (ulong) xaddr, xlen);
2486 }
2487 }
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497void txFreeMap(struct inode *ip,
2498 struct maplock * maplock, struct tblock * tblk, int maptype)
2499{
2500 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2501 struct xdlistlock *xadlistlock;
2502 xad_t *xad;
2503 s64 xaddr;
2504 int xlen;
2505 struct pxd_lock *pxdlock;
2506 struct xdlistlock *pxdlistlock;
2507 pxd_t *pxd;
2508 int n;
2509
2510 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2511 tblk, maplock, maptype);
2512
2513
2514
2515
2516 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2517 if (maplock->flag & mlckFREEXADLIST) {
2518 xadlistlock = (struct xdlistlock *) maplock;
2519 xad = xadlistlock->xdlist;
2520 for (n = 0; n < xadlistlock->count; n++, xad++) {
2521 if (!(xad->flag & XAD_NEW)) {
2522 xaddr = addressXAD(xad);
2523 xlen = lengthXAD(xad);
2524 dbUpdatePMap(ipbmap, true, xaddr,
2525 (s64) xlen, tblk);
2526 jfs_info("freePMap: xaddr:0x%lx "
2527 "xlen:%d",
2528 (ulong) xaddr, xlen);
2529 }
2530 }
2531 } else if (maplock->flag & mlckFREEPXD) {
2532 pxdlock = (struct pxd_lock *) maplock;
2533 xaddr = addressPXD(&pxdlock->pxd);
2534 xlen = lengthPXD(&pxdlock->pxd);
2535 dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
2536 tblk);
2537 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2538 (ulong) xaddr, xlen);
2539 } else {
2540
2541 pxdlistlock = (struct xdlistlock *) maplock;
2542 pxd = pxdlistlock->xdlist;
2543 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2544 xaddr = addressPXD(pxd);
2545 xlen = lengthPXD(pxd);
2546 dbUpdatePMap(ipbmap, true, xaddr,
2547 (s64) xlen, tblk);
2548 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2549 (ulong) xaddr, xlen);
2550 }
2551 }
2552 }
2553
2554
2555
2556
2557 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2558 if (maplock->flag & mlckFREEXADLIST) {
2559 xadlistlock = (struct xdlistlock *) maplock;
2560 xad = xadlistlock->xdlist;
2561 for (n = 0; n < xadlistlock->count; n++, xad++) {
2562 xaddr = addressXAD(xad);
2563 xlen = lengthXAD(xad);
2564 dbFree(ip, xaddr, (s64) xlen);
2565 xad->flag = 0;
2566 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2567 (ulong) xaddr, xlen);
2568 }
2569 } else if (maplock->flag & mlckFREEPXD) {
2570 pxdlock = (struct pxd_lock *) maplock;
2571 xaddr = addressPXD(&pxdlock->pxd);
2572 xlen = lengthPXD(&pxdlock->pxd);
2573 dbFree(ip, xaddr, (s64) xlen);
2574 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2575 (ulong) xaddr, xlen);
2576 } else {
2577
2578 pxdlistlock = (struct xdlistlock *) maplock;
2579 pxd = pxdlistlock->xdlist;
2580 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2581 xaddr = addressPXD(pxd);
2582 xlen = lengthPXD(pxd);
2583 dbFree(ip, xaddr, (s64) xlen);
2584 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2585 (ulong) xaddr, xlen);
2586 }
2587 }
2588 }
2589}
2590
2591
2592
2593
2594
2595
2596void txFreelock(struct inode *ip)
2597{
2598 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2599 struct tlock *xtlck, *tlck;
2600 lid_t xlid = 0, lid;
2601
2602 if (!jfs_ip->atlhead)
2603 return;
2604
2605 TXN_LOCK();
2606 xtlck = (struct tlock *) &jfs_ip->atlhead;
2607
2608 while ((lid = xtlck->next) != 0) {
2609 tlck = lid_to_tlock(lid);
2610 if (tlck->flag & tlckFREELOCK) {
2611 xtlck->next = tlck->next;
2612 txLockFree(lid);
2613 } else {
2614 xtlck = tlck;
2615 xlid = lid;
2616 }
2617 }
2618
2619 if (jfs_ip->atlhead)
2620 jfs_ip->atltail = xlid;
2621 else {
2622 jfs_ip->atltail = 0;
2623
2624
2625
2626 list_del_init(&jfs_ip->anon_inode_list);
2627 }
2628 TXN_UNLOCK();
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642void txAbort(tid_t tid, int dirty)
2643{
2644 lid_t lid, next;
2645 struct metapage *mp;
2646 struct tblock *tblk = tid_to_tblock(tid);
2647 struct tlock *tlck;
2648
2649
2650
2651
2652 for (lid = tblk->next; lid; lid = next) {
2653 tlck = lid_to_tlock(lid);
2654 next = tlck->next;
2655 mp = tlck->mp;
2656 JFS_IP(tlck->ip)->xtlid = 0;
2657
2658 if (mp) {
2659 mp->lid = 0;
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2670 LogSyncRelease(mp);
2671 }
2672
2673 TXN_LOCK();
2674 txLockFree(lid);
2675 TXN_UNLOCK();
2676 }
2677
2678
2679
2680 tblk->next = tblk->last = 0;
2681
2682
2683
2684
2685 if (dirty)
2686 jfs_error(tblk->sb, "\n");
2687
2688 return;
2689}
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699static void txLazyCommit(struct tblock * tblk)
2700{
2701 struct jfs_log *log;
2702
2703 while (((tblk->flag & tblkGC_READY) == 0) &&
2704 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2705
2706
2707 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2708 yield();
2709 }
2710
2711 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2712
2713 txUpdateMap(tblk);
2714
2715 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2716
2717 spin_lock_irq(&log->gclock);
2718
2719 tblk->flag |= tblkGC_COMMITTED;
2720
2721 if (tblk->flag & tblkGC_READY)
2722 log->gcrtc--;
2723
2724 wake_up_all(&tblk->gcwait);
2725
2726
2727
2728
2729 if (tblk->flag & tblkGC_LAZY) {
2730 spin_unlock_irq(&log->gclock);
2731 txUnlock(tblk);
2732 tblk->flag &= ~tblkGC_LAZY;
2733 txEnd(tblk - TxBlock);
2734 } else
2735 spin_unlock_irq(&log->gclock);
2736
2737 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2738}
2739
2740
2741
2742
2743
2744
2745
2746
2747int jfs_lazycommit(void *arg)
2748{
2749 int WorkDone;
2750 struct tblock *tblk;
2751 unsigned long flags;
2752 struct jfs_sb_info *sbi;
2753
2754 do {
2755 LAZY_LOCK(flags);
2756 jfs_commit_thread_waking = 0;
2757 while (!list_empty(&TxAnchor.unlock_queue)) {
2758 WorkDone = 0;
2759 list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2760 cqueue) {
2761
2762 sbi = JFS_SBI(tblk->sb);
2763
2764
2765
2766
2767
2768
2769 if (sbi->commit_state & IN_LAZYCOMMIT)
2770 continue;
2771
2772 sbi->commit_state |= IN_LAZYCOMMIT;
2773 WorkDone = 1;
2774
2775
2776
2777
2778 list_del(&tblk->cqueue);
2779
2780 LAZY_UNLOCK(flags);
2781 txLazyCommit(tblk);
2782 LAZY_LOCK(flags);
2783
2784 sbi->commit_state &= ~IN_LAZYCOMMIT;
2785
2786
2787
2788
2789
2790 break;
2791 }
2792
2793
2794 if (!WorkDone)
2795 break;
2796 }
2797
2798 jfs_commit_thread_waking = 0;
2799
2800 if (freezing(current)) {
2801 LAZY_UNLOCK(flags);
2802 try_to_freeze();
2803 } else {
2804 DECLARE_WAITQUEUE(wq, current);
2805
2806 add_wait_queue(&jfs_commit_thread_wait, &wq);
2807 set_current_state(TASK_INTERRUPTIBLE);
2808 LAZY_UNLOCK(flags);
2809 schedule();
2810 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2811 }
2812 } while (!kthread_should_stop());
2813
2814 if (!list_empty(&TxAnchor.unlock_queue))
2815 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2816 else
2817 jfs_info("jfs_lazycommit being killed\n");
2818 return 0;
2819}
2820
2821void txLazyUnlock(struct tblock * tblk)
2822{
2823 unsigned long flags;
2824
2825 LAZY_LOCK(flags);
2826
2827 list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2828
2829
2830
2831
2832 if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2833 !jfs_commit_thread_waking) {
2834 jfs_commit_thread_waking = 1;
2835 wake_up(&jfs_commit_thread_wait);
2836 }
2837 LAZY_UNLOCK(flags);
2838}
2839
2840static void LogSyncRelease(struct metapage * mp)
2841{
2842 struct jfs_log *log = mp->log;
2843
2844 assert(mp->nohomeok);
2845 assert(log);
2846 metapage_homeok(mp);
2847}
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859void txQuiesce(struct super_block *sb)
2860{
2861 struct inode *ip;
2862 struct jfs_inode_info *jfs_ip;
2863 struct jfs_log *log = JFS_SBI(sb)->log;
2864 tid_t tid;
2865
2866 set_bit(log_QUIESCE, &log->flag);
2867
2868 TXN_LOCK();
2869restart:
2870 while (!list_empty(&TxAnchor.anon_list)) {
2871 jfs_ip = list_entry(TxAnchor.anon_list.next,
2872 struct jfs_inode_info,
2873 anon_inode_list);
2874 ip = &jfs_ip->vfs_inode;
2875
2876
2877
2878
2879
2880 TXN_UNLOCK();
2881 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2882 mutex_lock(&jfs_ip->commit_mutex);
2883 txCommit(tid, 1, &ip, 0);
2884 txEnd(tid);
2885 mutex_unlock(&jfs_ip->commit_mutex);
2886
2887
2888
2889
2890 cond_resched();
2891 TXN_LOCK();
2892 }
2893
2894
2895
2896
2897
2898 if (!list_empty(&TxAnchor.anon_list2)) {
2899 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2900 INIT_LIST_HEAD(&TxAnchor.anon_list2);
2901 goto restart;
2902 }
2903 TXN_UNLOCK();
2904
2905
2906
2907
2908 jfs_flush_journal(log, 0);
2909}
2910
2911
2912
2913
2914
2915
2916void txResume(struct super_block *sb)
2917{
2918 struct jfs_log *log = JFS_SBI(sb)->log;
2919
2920 clear_bit(log_QUIESCE, &log->flag);
2921 TXN_WAKEUP(&log->syncwait);
2922}
2923
2924
2925
2926
2927
2928
2929
2930
2931int jfs_sync(void *arg)
2932{
2933 struct inode *ip;
2934 struct jfs_inode_info *jfs_ip;
2935 tid_t tid;
2936
2937 do {
2938
2939
2940
2941 TXN_LOCK();
2942 while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2943 jfs_ip = list_entry(TxAnchor.anon_list.next,
2944 struct jfs_inode_info,
2945 anon_inode_list);
2946 ip = &jfs_ip->vfs_inode;
2947
2948 if (! igrab(ip)) {
2949
2950
2951
2952 list_del_init(&jfs_ip->anon_inode_list);
2953 } else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2954
2955
2956
2957
2958 TXN_UNLOCK();
2959 tid = txBegin(ip->i_sb, COMMIT_INODE);
2960 txCommit(tid, 1, &ip, 0);
2961 txEnd(tid);
2962 mutex_unlock(&jfs_ip->commit_mutex);
2963
2964 iput(ip);
2965
2966
2967
2968
2969 cond_resched();
2970 TXN_LOCK();
2971 } else {
2972
2973
2974
2975
2976
2977
2978
2979 list_move(&jfs_ip->anon_inode_list,
2980 &TxAnchor.anon_list2);
2981
2982 TXN_UNLOCK();
2983 iput(ip);
2984 TXN_LOCK();
2985 }
2986 }
2987
2988 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2989
2990 if (freezing(current)) {
2991 TXN_UNLOCK();
2992 try_to_freeze();
2993 } else {
2994 set_current_state(TASK_INTERRUPTIBLE);
2995 TXN_UNLOCK();
2996 schedule();
2997 }
2998 } while (!kthread_should_stop());
2999
3000 jfs_info("jfs_sync being killed");
3001 return 0;
3002}
3003
3004#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
3005static int jfs_txanchor_proc_show(struct seq_file *m, void *v)
3006{
3007 char *freewait;
3008 char *freelockwait;
3009 char *lowlockwait;
3010
3011 freewait =
3012 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3013 freelockwait =
3014 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3015 lowlockwait =
3016 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3017
3018 seq_printf(m,
3019 "JFS TxAnchor\n"
3020 "============\n"
3021 "freetid = %d\n"
3022 "freewait = %s\n"
3023 "freelock = %d\n"
3024 "freelockwait = %s\n"
3025 "lowlockwait = %s\n"
3026 "tlocksInUse = %d\n"
3027 "jfs_tlocks_low = %d\n"
3028 "unlock_queue is %sempty\n",
3029 TxAnchor.freetid,
3030 freewait,
3031 TxAnchor.freelock,
3032 freelockwait,
3033 lowlockwait,
3034 TxAnchor.tlocksInUse,
3035 jfs_tlocks_low,
3036 list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
3037 return 0;
3038}
3039
3040static int jfs_txanchor_proc_open(struct inode *inode, struct file *file)
3041{
3042 return single_open(file, jfs_txanchor_proc_show, NULL);
3043}
3044
3045const struct file_operations jfs_txanchor_proc_fops = {
3046 .owner = THIS_MODULE,
3047 .open = jfs_txanchor_proc_open,
3048 .read = seq_read,
3049 .llseek = seq_lseek,
3050 .release = single_release,
3051};
3052#endif
3053
3054#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3055static int jfs_txstats_proc_show(struct seq_file *m, void *v)
3056{
3057 seq_printf(m,
3058 "JFS TxStats\n"
3059 "===========\n"
3060 "calls to txBegin = %d\n"
3061 "txBegin blocked by sync barrier = %d\n"
3062 "txBegin blocked by tlocks low = %d\n"
3063 "txBegin blocked by no free tid = %d\n"
3064 "calls to txBeginAnon = %d\n"
3065 "txBeginAnon blocked by sync barrier = %d\n"
3066 "txBeginAnon blocked by tlocks low = %d\n"
3067 "calls to txLockAlloc = %d\n"
3068 "tLockAlloc blocked by no free lock = %d\n",
3069 TxStat.txBegin,
3070 TxStat.txBegin_barrier,
3071 TxStat.txBegin_lockslow,
3072 TxStat.txBegin_freetid,
3073 TxStat.txBeginAnon,
3074 TxStat.txBeginAnon_barrier,
3075 TxStat.txBeginAnon_lockslow,
3076 TxStat.txLockAlloc,
3077 TxStat.txLockAlloc_freelock);
3078 return 0;
3079}
3080
3081static int jfs_txstats_proc_open(struct inode *inode, struct file *file)
3082{
3083 return single_open(file, jfs_txstats_proc_show, NULL);
3084}
3085
3086const struct file_operations jfs_txstats_proc_fops = {
3087 .owner = THIS_MODULE,
3088 .open = jfs_txstats_proc_open,
3089 .read = seq_read,
3090 .llseek = seq_lseek,
3091 .release = single_release,
3092};
3093#endif
3094