1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_dinode.h"
32#include "xfs_error.h"
33#include "xfs_filestream.h"
34#include "xfs_vnodeops.h"
35#include "xfs_inode_item.h"
36#include "xfs_quota.h"
37#include "xfs_trace.h"
38#include "xfs_fsops.h"
39
40#include <linux/kthread.h>
41#include <linux/freezer.h>
42
43struct workqueue_struct *xfs_syncd_wq;
44
45
46
47
48
49
50
51#define XFS_LOOKUP_BATCH 32
52
53STATIC int
54xfs_inode_ag_walk_grab(
55 struct xfs_inode *ip)
56{
57 struct inode *inode = VFS_I(ip);
58
59 ASSERT(rcu_read_lock_held());
60
61
62
63
64
65
66
67
68
69
70 spin_lock(&ip->i_flags_lock);
71 if (!ip->i_ino)
72 goto out_unlock_noent;
73
74
75 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
76 goto out_unlock_noent;
77 spin_unlock(&ip->i_flags_lock);
78
79
80 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
81 return EFSCORRUPTED;
82
83
84 if (!igrab(inode))
85 return ENOENT;
86
87 if (is_bad_inode(inode)) {
88 IRELE(ip);
89 return ENOENT;
90 }
91
92
93 return 0;
94
95out_unlock_noent:
96 spin_unlock(&ip->i_flags_lock);
97 return ENOENT;
98}
99
100STATIC int
101xfs_inode_ag_walk(
102 struct xfs_mount *mp,
103 struct xfs_perag *pag,
104 int (*execute)(struct xfs_inode *ip,
105 struct xfs_perag *pag, int flags),
106 int flags)
107{
108 uint32_t first_index;
109 int last_error = 0;
110 int skipped;
111 int done;
112 int nr_found;
113
114restart:
115 done = 0;
116 skipped = 0;
117 first_index = 0;
118 nr_found = 0;
119 do {
120 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
121 int error = 0;
122 int i;
123
124 rcu_read_lock();
125 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
126 (void **)batch, first_index,
127 XFS_LOOKUP_BATCH);
128 if (!nr_found) {
129 rcu_read_unlock();
130 break;
131 }
132
133
134
135
136
137 for (i = 0; i < nr_found; i++) {
138 struct xfs_inode *ip = batch[i];
139
140 if (done || xfs_inode_ag_walk_grab(ip))
141 batch[i] = NULL;
142
143
144
145
146
147
148
149
150
151
152
153
154
155 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
156 continue;
157 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
158 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
159 done = 1;
160 }
161
162
163 rcu_read_unlock();
164
165 for (i = 0; i < nr_found; i++) {
166 if (!batch[i])
167 continue;
168 error = execute(batch[i], pag, flags);
169 IRELE(batch[i]);
170 if (error == EAGAIN) {
171 skipped++;
172 continue;
173 }
174 if (error && last_error != EFSCORRUPTED)
175 last_error = error;
176 }
177
178
179 if (error == EFSCORRUPTED)
180 break;
181
182 cond_resched();
183
184 } while (nr_found && !done);
185
186 if (skipped) {
187 delay(1);
188 goto restart;
189 }
190 return last_error;
191}
192
193int
194xfs_inode_ag_iterator(
195 struct xfs_mount *mp,
196 int (*execute)(struct xfs_inode *ip,
197 struct xfs_perag *pag, int flags),
198 int flags)
199{
200 struct xfs_perag *pag;
201 int error = 0;
202 int last_error = 0;
203 xfs_agnumber_t ag;
204
205 ag = 0;
206 while ((pag = xfs_perag_get(mp, ag))) {
207 ag = pag->pag_agno + 1;
208 error = xfs_inode_ag_walk(mp, pag, execute, flags);
209 xfs_perag_put(pag);
210 if (error) {
211 last_error = error;
212 if (error == EFSCORRUPTED)
213 break;
214 }
215 }
216 return XFS_ERROR(last_error);
217}
218
219STATIC int
220xfs_sync_inode_data(
221 struct xfs_inode *ip,
222 struct xfs_perag *pag,
223 int flags)
224{
225 struct inode *inode = VFS_I(ip);
226 struct address_space *mapping = inode->i_mapping;
227 int error = 0;
228
229 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
230 return 0;
231
232 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
233 if (flags & SYNC_TRYLOCK)
234 return 0;
235 xfs_ilock(ip, XFS_IOLOCK_SHARED);
236 }
237
238 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
239 0 : XBF_ASYNC, FI_NONE);
240 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
241 return error;
242}
243
244STATIC int
245xfs_sync_inode_attr(
246 struct xfs_inode *ip,
247 struct xfs_perag *pag,
248 int flags)
249{
250 int error = 0;
251
252 xfs_ilock(ip, XFS_ILOCK_SHARED);
253 if (xfs_inode_clean(ip))
254 goto out_unlock;
255 if (!xfs_iflock_nowait(ip)) {
256 if (!(flags & SYNC_WAIT))
257 goto out_unlock;
258 xfs_iflock(ip);
259 }
260
261 if (xfs_inode_clean(ip)) {
262 xfs_ifunlock(ip);
263 goto out_unlock;
264 }
265
266 error = xfs_iflush(ip, flags);
267
268
269
270
271
272
273 if (error == EAGAIN) {
274 ASSERT(!(flags & SYNC_WAIT));
275 error = 0;
276 }
277
278 out_unlock:
279 xfs_iunlock(ip, XFS_ILOCK_SHARED);
280 return error;
281}
282
283
284
285
286STATIC int
287xfs_sync_data(
288 struct xfs_mount *mp,
289 int flags)
290{
291 int error;
292
293 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
294
295 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
296 if (error)
297 return XFS_ERROR(error);
298
299 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
300 return 0;
301}
302
303
304
305
306STATIC int
307xfs_sync_attr(
308 struct xfs_mount *mp,
309 int flags)
310{
311 ASSERT((flags & ~SYNC_WAIT) == 0);
312
313 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
314}
315
316STATIC int
317xfs_sync_fsdata(
318 struct xfs_mount *mp)
319{
320 struct xfs_buf *bp;
321 int error;
322
323
324
325
326
327
328
329
330
331 bp = xfs_getsb(mp, 0);
332 if (xfs_buf_ispinned(bp))
333 xfs_log_force(mp, 0);
334 error = xfs_bwrite(bp);
335 xfs_buf_relse(bp);
336 return error;
337}
338
339int
340xfs_log_dirty_inode(
341 struct xfs_inode *ip,
342 struct xfs_perag *pag,
343 int flags)
344{
345 struct xfs_mount *mp = ip->i_mount;
346 struct xfs_trans *tp;
347 int error;
348
349 if (!ip->i_update_core)
350 return 0;
351
352 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
353 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
354 if (error) {
355 xfs_trans_cancel(tp, 0);
356 return error;
357 }
358
359 xfs_ilock(ip, XFS_ILOCK_EXCL);
360 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
361 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
362 return xfs_trans_commit(tp, 0);
363}
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382int
383xfs_quiesce_data(
384 struct xfs_mount *mp)
385{
386 int error, error2 = 0;
387
388
389
390
391
392
393
394
395
396 xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
397
398 xfs_qm_sync(mp, SYNC_TRYLOCK);
399 xfs_qm_sync(mp, SYNC_WAIT);
400
401
402 xfs_log_force(mp, XFS_LOG_SYNC);
403
404
405 error = xfs_sync_fsdata(mp);
406
407
408 xfs_flush_buftarg(mp->m_ddev_targp, 1);
409
410
411 if (xfs_log_need_covered(mp))
412 error2 = xfs_fs_log_dummy(mp);
413
414
415 if (mp->m_rtdev_targp)
416 xfs_flush_buftarg(mp->m_rtdev_targp, 1);
417
418 return error ? error : error2;
419}
420
421STATIC void
422xfs_quiesce_fs(
423 struct xfs_mount *mp)
424{
425 int count = 0, pincount;
426
427 xfs_reclaim_inodes(mp, 0);
428 xfs_flush_buftarg(mp->m_ddev_targp, 0);
429
430
431
432
433
434
435
436
437 do {
438 xfs_reclaim_inodes(mp, SYNC_WAIT);
439 xfs_sync_attr(mp, SYNC_WAIT);
440 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
441 if (!pincount) {
442 delay(50);
443 count++;
444 }
445 } while (count < 2);
446}
447
448
449
450
451
452
453void
454xfs_quiesce_attr(
455 struct xfs_mount *mp)
456{
457 int error = 0;
458
459
460 while (atomic_read(&mp->m_active_trans) > 0)
461 delay(100);
462
463
464 xfs_quiesce_fs(mp);
465
466
467
468
469
470 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
471
472
473 error = xfs_log_sbcount(mp);
474 if (error)
475 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
476 "Frozen image may not be consistent.");
477 xfs_log_unmount_write(mp);
478 xfs_unmountfs_writesb(mp);
479}
480
481static void
482xfs_syncd_queue_sync(
483 struct xfs_mount *mp)
484{
485 queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
486 msecs_to_jiffies(xfs_syncd_centisecs * 10));
487}
488
489
490
491
492
493
494STATIC void
495xfs_sync_worker(
496 struct work_struct *work)
497{
498 struct xfs_mount *mp = container_of(to_delayed_work(work),
499 struct xfs_mount, m_sync_work);
500 int error;
501
502 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
503
504 if (mp->m_super->s_frozen == SB_UNFROZEN &&
505 xfs_log_need_covered(mp))
506 error = xfs_fs_log_dummy(mp);
507 else
508 xfs_log_force(mp, 0);
509 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
510
511
512 xfs_ail_push_all(mp->m_ail);
513 }
514
515
516 xfs_syncd_queue_sync(mp);
517}
518
519
520
521
522
523
524
525
526static void
527xfs_syncd_queue_reclaim(
528 struct xfs_mount *mp)
529{
530
531
532
533
534
535
536 if (!(mp->m_super->s_flags & MS_ACTIVE))
537 return;
538
539 rcu_read_lock();
540 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
541 queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
542 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
543 }
544 rcu_read_unlock();
545}
546
547
548
549
550
551
552
553
554STATIC void
555xfs_reclaim_worker(
556 struct work_struct *work)
557{
558 struct xfs_mount *mp = container_of(to_delayed_work(work),
559 struct xfs_mount, m_reclaim_work);
560
561 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
562 xfs_syncd_queue_reclaim(mp);
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577void
578xfs_flush_inodes(
579 struct xfs_inode *ip)
580{
581 struct xfs_mount *mp = ip->i_mount;
582
583 queue_work(xfs_syncd_wq, &mp->m_flush_work);
584 flush_work_sync(&mp->m_flush_work);
585}
586
587STATIC void
588xfs_flush_worker(
589 struct work_struct *work)
590{
591 struct xfs_mount *mp = container_of(work,
592 struct xfs_mount, m_flush_work);
593
594 xfs_sync_data(mp, SYNC_TRYLOCK);
595 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
596}
597
598int
599xfs_syncd_init(
600 struct xfs_mount *mp)
601{
602 INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
603 INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
604 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
605
606 xfs_syncd_queue_sync(mp);
607 xfs_syncd_queue_reclaim(mp);
608
609 return 0;
610}
611
612void
613xfs_syncd_stop(
614 struct xfs_mount *mp)
615{
616 cancel_delayed_work_sync(&mp->m_sync_work);
617 cancel_delayed_work_sync(&mp->m_reclaim_work);
618 cancel_work_sync(&mp->m_flush_work);
619}
620
621void
622__xfs_inode_set_reclaim_tag(
623 struct xfs_perag *pag,
624 struct xfs_inode *ip)
625{
626 radix_tree_tag_set(&pag->pag_ici_root,
627 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
628 XFS_ICI_RECLAIM_TAG);
629
630 if (!pag->pag_ici_reclaimable) {
631
632 spin_lock(&ip->i_mount->m_perag_lock);
633 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
634 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
635 XFS_ICI_RECLAIM_TAG);
636 spin_unlock(&ip->i_mount->m_perag_lock);
637
638
639 xfs_syncd_queue_reclaim(ip->i_mount);
640
641 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
642 -1, _RET_IP_);
643 }
644 pag->pag_ici_reclaimable++;
645}
646
647
648
649
650
651
652void
653xfs_inode_set_reclaim_tag(
654 xfs_inode_t *ip)
655{
656 struct xfs_mount *mp = ip->i_mount;
657 struct xfs_perag *pag;
658
659 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
660 spin_lock(&pag->pag_ici_lock);
661 spin_lock(&ip->i_flags_lock);
662 __xfs_inode_set_reclaim_tag(pag, ip);
663 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
664 spin_unlock(&ip->i_flags_lock);
665 spin_unlock(&pag->pag_ici_lock);
666 xfs_perag_put(pag);
667}
668
669STATIC void
670__xfs_inode_clear_reclaim(
671 xfs_perag_t *pag,
672 xfs_inode_t *ip)
673{
674 pag->pag_ici_reclaimable--;
675 if (!pag->pag_ici_reclaimable) {
676
677 spin_lock(&ip->i_mount->m_perag_lock);
678 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
679 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
680 XFS_ICI_RECLAIM_TAG);
681 spin_unlock(&ip->i_mount->m_perag_lock);
682 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
683 -1, _RET_IP_);
684 }
685}
686
687void
688__xfs_inode_clear_reclaim_tag(
689 xfs_mount_t *mp,
690 xfs_perag_t *pag,
691 xfs_inode_t *ip)
692{
693 radix_tree_tag_clear(&pag->pag_ici_root,
694 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
695 __xfs_inode_clear_reclaim(pag, ip);
696}
697
698
699
700
701
702STATIC int
703xfs_reclaim_inode_grab(
704 struct xfs_inode *ip,
705 int flags)
706{
707 ASSERT(rcu_read_lock_held());
708
709
710 if (!ip->i_ino)
711 return 1;
712
713
714
715
716
717
718 if ((flags & SYNC_TRYLOCK) &&
719 (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
720 return 1;
721 }
722
723
724
725
726
727
728
729
730
731
732
733 spin_lock(&ip->i_flags_lock);
734 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
735 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
736
737 spin_unlock(&ip->i_flags_lock);
738 return 1;
739 }
740 __xfs_iflags_set(ip, XFS_IRECLAIM);
741 spin_unlock(&ip->i_flags_lock);
742 return 0;
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795STATIC int
796xfs_reclaim_inode(
797 struct xfs_inode *ip,
798 struct xfs_perag *pag,
799 int sync_mode)
800{
801 int error;
802
803restart:
804 error = 0;
805 xfs_ilock(ip, XFS_ILOCK_EXCL);
806 if (!xfs_iflock_nowait(ip)) {
807 if (!(sync_mode & SYNC_WAIT))
808 goto out;
809
810
811
812
813
814
815
816
817
818
819 xfs_promote_inode(ip);
820 xfs_iflock(ip);
821 }
822
823 if (is_bad_inode(VFS_I(ip)))
824 goto reclaim;
825 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
826 xfs_iunpin_wait(ip);
827 goto reclaim;
828 }
829 if (xfs_ipincount(ip)) {
830 if (!(sync_mode & SYNC_WAIT)) {
831 xfs_ifunlock(ip);
832 goto out;
833 }
834 xfs_iunpin_wait(ip);
835 }
836 if (xfs_iflags_test(ip, XFS_ISTALE))
837 goto reclaim;
838 if (xfs_inode_clean(ip))
839 goto reclaim;
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858 error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
859 if (sync_mode & SYNC_WAIT) {
860 if (error == EAGAIN) {
861 xfs_iunlock(ip, XFS_ILOCK_EXCL);
862
863 delay(2);
864 goto restart;
865 }
866 xfs_iflock(ip);
867 goto reclaim;
868 }
869
870
871
872
873
874
875
876
877
878
879 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
880 xfs_warn(ip->i_mount,
881 "inode 0x%llx background reclaim flush failed with %d",
882 (long long)ip->i_ino, error);
883 }
884out:
885 xfs_iflags_clear(ip, XFS_IRECLAIM);
886 xfs_iunlock(ip, XFS_ILOCK_EXCL);
887
888
889
890
891
892
893
894 return 0;
895
896reclaim:
897 xfs_ifunlock(ip);
898 xfs_iunlock(ip, XFS_ILOCK_EXCL);
899
900 XFS_STATS_INC(xs_ig_reclaims);
901
902
903
904
905
906
907
908 spin_lock(&pag->pag_ici_lock);
909 if (!radix_tree_delete(&pag->pag_ici_root,
910 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
911 ASSERT(0);
912 __xfs_inode_clear_reclaim(pag, ip);
913 spin_unlock(&pag->pag_ici_lock);
914
915
916
917
918
919
920
921
922
923
924
925 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
926 xfs_qm_dqdetach(ip);
927 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
928
929 xfs_inode_free(ip);
930 return error;
931
932}
933
934
935
936
937
938
939
940int
941xfs_reclaim_inodes_ag(
942 struct xfs_mount *mp,
943 int flags,
944 int *nr_to_scan)
945{
946 struct xfs_perag *pag;
947 int error = 0;
948 int last_error = 0;
949 xfs_agnumber_t ag;
950 int trylock = flags & SYNC_TRYLOCK;
951 int skipped;
952
953restart:
954 ag = 0;
955 skipped = 0;
956 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
957 unsigned long first_index = 0;
958 int done = 0;
959 int nr_found = 0;
960
961 ag = pag->pag_agno + 1;
962
963 if (trylock) {
964 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
965 skipped++;
966 xfs_perag_put(pag);
967 continue;
968 }
969 first_index = pag->pag_ici_reclaim_cursor;
970 } else
971 mutex_lock(&pag->pag_ici_reclaim_lock);
972
973 do {
974 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
975 int i;
976
977 rcu_read_lock();
978 nr_found = radix_tree_gang_lookup_tag(
979 &pag->pag_ici_root,
980 (void **)batch, first_index,
981 XFS_LOOKUP_BATCH,
982 XFS_ICI_RECLAIM_TAG);
983 if (!nr_found) {
984 done = 1;
985 rcu_read_unlock();
986 break;
987 }
988
989
990
991
992
993 for (i = 0; i < nr_found; i++) {
994 struct xfs_inode *ip = batch[i];
995
996 if (done || xfs_reclaim_inode_grab(ip, flags))
997 batch[i] = NULL;
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1014 pag->pag_agno)
1015 continue;
1016 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1017 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1018 done = 1;
1019 }
1020
1021
1022 rcu_read_unlock();
1023
1024 for (i = 0; i < nr_found; i++) {
1025 if (!batch[i])
1026 continue;
1027 error = xfs_reclaim_inode(batch[i], pag, flags);
1028 if (error && last_error != EFSCORRUPTED)
1029 last_error = error;
1030 }
1031
1032 *nr_to_scan -= XFS_LOOKUP_BATCH;
1033
1034 cond_resched();
1035
1036 } while (nr_found && !done && *nr_to_scan > 0);
1037
1038 if (trylock && !done)
1039 pag->pag_ici_reclaim_cursor = first_index;
1040 else
1041 pag->pag_ici_reclaim_cursor = 0;
1042 mutex_unlock(&pag->pag_ici_reclaim_lock);
1043 xfs_perag_put(pag);
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1054 trylock = 0;
1055 goto restart;
1056 }
1057 return XFS_ERROR(last_error);
1058}
1059
1060int
1061xfs_reclaim_inodes(
1062 xfs_mount_t *mp,
1063 int mode)
1064{
1065 int nr_to_scan = INT_MAX;
1066
1067 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079void
1080xfs_reclaim_inodes_nr(
1081 struct xfs_mount *mp,
1082 int nr_to_scan)
1083{
1084
1085 xfs_syncd_queue_reclaim(mp);
1086 xfs_ail_push_all(mp->m_ail);
1087
1088 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1089}
1090
1091
1092
1093
1094
1095int
1096xfs_reclaim_inodes_count(
1097 struct xfs_mount *mp)
1098{
1099 struct xfs_perag *pag;
1100 xfs_agnumber_t ag = 0;
1101 int reclaimable = 0;
1102
1103 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1104 ag = pag->pag_agno + 1;
1105 reclaimable += pag->pag_ici_reclaimable;
1106 xfs_perag_put(pag);
1107 }
1108 return reclaimable;
1109}
1110
1111