1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_inode.h"
30#include "xfs_dinode.h"
31#include "xfs_error.h"
32#include "xfs_filestream.h"
33#include "xfs_vnodeops.h"
34#include "xfs_inode_item.h"
35#include "xfs_quota.h"
36#include "xfs_trace.h"
37#include "xfs_fsops.h"
38
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41
42
43
44
45
46
47
48#define XFS_LOOKUP_BATCH 32
49
50STATIC int
51xfs_inode_ag_walk_grab(
52 struct xfs_inode *ip)
53{
54 struct inode *inode = VFS_I(ip);
55
56 ASSERT(rcu_read_lock_held());
57
58
59
60
61
62
63
64
65
66
67 spin_lock(&ip->i_flags_lock);
68 if (!ip->i_ino)
69 goto out_unlock_noent;
70
71
72 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
73 goto out_unlock_noent;
74 spin_unlock(&ip->i_flags_lock);
75
76
77 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
78 return EFSCORRUPTED;
79
80
81 if (!igrab(inode))
82 return ENOENT;
83
84 if (is_bad_inode(inode)) {
85 IRELE(ip);
86 return ENOENT;
87 }
88
89
90 return 0;
91
92out_unlock_noent:
93 spin_unlock(&ip->i_flags_lock);
94 return ENOENT;
95}
96
97STATIC int
98xfs_inode_ag_walk(
99 struct xfs_mount *mp,
100 struct xfs_perag *pag,
101 int (*execute)(struct xfs_inode *ip,
102 struct xfs_perag *pag, int flags),
103 int flags)
104{
105 uint32_t first_index;
106 int last_error = 0;
107 int skipped;
108 int done;
109 int nr_found;
110
111restart:
112 done = 0;
113 skipped = 0;
114 first_index = 0;
115 nr_found = 0;
116 do {
117 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
118 int error = 0;
119 int i;
120
121 rcu_read_lock();
122 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
123 (void **)batch, first_index,
124 XFS_LOOKUP_BATCH);
125 if (!nr_found) {
126 rcu_read_unlock();
127 break;
128 }
129
130
131
132
133
134 for (i = 0; i < nr_found; i++) {
135 struct xfs_inode *ip = batch[i];
136
137 if (done || xfs_inode_ag_walk_grab(ip))
138 batch[i] = NULL;
139
140
141
142
143
144
145
146
147
148
149
150
151
152 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
153 continue;
154 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
155 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
156 done = 1;
157 }
158
159
160 rcu_read_unlock();
161
162 for (i = 0; i < nr_found; i++) {
163 if (!batch[i])
164 continue;
165 error = execute(batch[i], pag, flags);
166 IRELE(batch[i]);
167 if (error == EAGAIN) {
168 skipped++;
169 continue;
170 }
171 if (error && last_error != EFSCORRUPTED)
172 last_error = error;
173 }
174
175
176 if (error == EFSCORRUPTED)
177 break;
178
179 } while (nr_found && !done);
180
181 if (skipped) {
182 delay(1);
183 goto restart;
184 }
185 return last_error;
186}
187
188int
189xfs_inode_ag_iterator(
190 struct xfs_mount *mp,
191 int (*execute)(struct xfs_inode *ip,
192 struct xfs_perag *pag, int flags),
193 int flags)
194{
195 struct xfs_perag *pag;
196 int error = 0;
197 int last_error = 0;
198 xfs_agnumber_t ag;
199
200 ag = 0;
201 while ((pag = xfs_perag_get(mp, ag))) {
202 ag = pag->pag_agno + 1;
203 error = xfs_inode_ag_walk(mp, pag, execute, flags);
204 xfs_perag_put(pag);
205 if (error) {
206 last_error = error;
207 if (error == EFSCORRUPTED)
208 break;
209 }
210 }
211 return XFS_ERROR(last_error);
212}
213
214STATIC int
215xfs_sync_inode_data(
216 struct xfs_inode *ip,
217 struct xfs_perag *pag,
218 int flags)
219{
220 struct inode *inode = VFS_I(ip);
221 struct address_space *mapping = inode->i_mapping;
222 int error = 0;
223
224 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
225 goto out_wait;
226
227 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
228 if (flags & SYNC_TRYLOCK)
229 goto out_wait;
230 xfs_ilock(ip, XFS_IOLOCK_SHARED);
231 }
232
233 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
234 0 : XBF_ASYNC, FI_NONE);
235 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
236
237 out_wait:
238 if (flags & SYNC_WAIT)
239 xfs_ioend_wait(ip);
240 return error;
241}
242
243STATIC int
244xfs_sync_inode_attr(
245 struct xfs_inode *ip,
246 struct xfs_perag *pag,
247 int flags)
248{
249 int error = 0;
250
251 xfs_ilock(ip, XFS_ILOCK_SHARED);
252 if (xfs_inode_clean(ip))
253 goto out_unlock;
254 if (!xfs_iflock_nowait(ip)) {
255 if (!(flags & SYNC_WAIT))
256 goto out_unlock;
257 xfs_iflock(ip);
258 }
259
260 if (xfs_inode_clean(ip)) {
261 xfs_ifunlock(ip);
262 goto out_unlock;
263 }
264
265 error = xfs_iflush(ip, flags);
266
267 out_unlock:
268 xfs_iunlock(ip, XFS_ILOCK_SHARED);
269 return error;
270}
271
272
273
274
275STATIC int
276xfs_sync_data(
277 struct xfs_mount *mp,
278 int flags)
279{
280 int error;
281
282 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
283
284 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
285 if (error)
286 return XFS_ERROR(error);
287
288 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
289 return 0;
290}
291
292
293
294
295STATIC int
296xfs_sync_attr(
297 struct xfs_mount *mp,
298 int flags)
299{
300 ASSERT((flags & ~SYNC_WAIT) == 0);
301
302 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
303}
304
305STATIC int
306xfs_sync_fsdata(
307 struct xfs_mount *mp)
308{
309 struct xfs_buf *bp;
310
311
312
313
314
315
316
317
318
319 bp = xfs_getsb(mp, 0);
320 if (XFS_BUF_ISPINNED(bp))
321 xfs_log_force(mp, 0);
322
323 return xfs_bwrite(mp, bp);
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343int
344xfs_quiesce_data(
345 struct xfs_mount *mp)
346{
347 int error, error2 = 0;
348
349
350 xfs_sync_data(mp, 0);
351 xfs_qm_sync(mp, SYNC_TRYLOCK);
352
353
354 xfs_sync_data(mp, SYNC_WAIT);
355 xfs_qm_sync(mp, SYNC_WAIT);
356
357
358 error = xfs_sync_fsdata(mp);
359
360
361 xfs_flush_buftarg(mp->m_ddev_targp, 1);
362
363
364 if (xfs_log_need_covered(mp))
365 error2 = xfs_fs_log_dummy(mp);
366
367
368 if (mp->m_rtdev_targp)
369 XFS_bflush(mp->m_rtdev_targp);
370
371 return error ? error : error2;
372}
373
374STATIC void
375xfs_quiesce_fs(
376 struct xfs_mount *mp)
377{
378 int count = 0, pincount;
379
380 xfs_reclaim_inodes(mp, 0);
381 xfs_flush_buftarg(mp->m_ddev_targp, 0);
382
383
384
385
386
387
388
389
390 do {
391 xfs_reclaim_inodes(mp, SYNC_WAIT);
392 xfs_sync_attr(mp, SYNC_WAIT);
393 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
394 if (!pincount) {
395 delay(50);
396 count++;
397 }
398 } while (count < 2);
399}
400
401
402
403
404
405
406void
407xfs_quiesce_attr(
408 struct xfs_mount *mp)
409{
410 int error = 0;
411
412
413 while (atomic_read(&mp->m_active_trans) > 0)
414 delay(100);
415
416
417 xfs_quiesce_fs(mp);
418
419
420
421
422
423 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
424
425
426 error = xfs_log_sbcount(mp, 1);
427 if (error)
428 xfs_fs_cmn_err(CE_WARN, mp,
429 "xfs_attr_quiesce: failed to log sb changes. "
430 "Frozen image may not be consistent.");
431 xfs_log_unmount_write(mp);
432 xfs_unmountfs_writesb(mp);
433}
434
435
436
437
438
439
440
441
442STATIC void
443xfs_syncd_queue_work(
444 struct xfs_mount *mp,
445 void *data,
446 void (*syncer)(struct xfs_mount *, void *),
447 struct completion *completion)
448{
449 struct xfs_sync_work *work;
450
451 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
452 INIT_LIST_HEAD(&work->w_list);
453 work->w_syncer = syncer;
454 work->w_data = data;
455 work->w_mount = mp;
456 work->w_completion = completion;
457 spin_lock(&mp->m_sync_lock);
458 list_add_tail(&work->w_list, &mp->m_sync_list);
459 spin_unlock(&mp->m_sync_lock);
460 wake_up_process(mp->m_sync_task);
461}
462
463
464
465
466
467
468
469STATIC void
470xfs_flush_inodes_work(
471 struct xfs_mount *mp,
472 void *arg)
473{
474 struct inode *inode = arg;
475 xfs_sync_data(mp, SYNC_TRYLOCK);
476 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
477 iput(inode);
478}
479
480void
481xfs_flush_inodes(
482 xfs_inode_t *ip)
483{
484 struct inode *inode = VFS_I(ip);
485 DECLARE_COMPLETION_ONSTACK(completion);
486
487 igrab(inode);
488 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
489 wait_for_completion(&completion);
490 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
491}
492
493
494
495
496
497
498STATIC void
499xfs_sync_worker(
500 struct xfs_mount *mp,
501 void *unused)
502{
503 int error;
504
505 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
506
507 if (mp->m_super->s_frozen == SB_UNFROZEN &&
508 xfs_log_need_covered(mp))
509 error = xfs_fs_log_dummy(mp);
510 else
511 xfs_log_force(mp, 0);
512 xfs_reclaim_inodes(mp, 0);
513 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
514 }
515 mp->m_sync_seq++;
516 wake_up(&mp->m_wait_single_sync_task);
517}
518
519STATIC int
520xfssyncd(
521 void *arg)
522{
523 struct xfs_mount *mp = arg;
524 long timeleft;
525 xfs_sync_work_t *work, *n;
526 LIST_HEAD (tmp);
527
528 set_freezable();
529 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
530 for (;;) {
531 if (list_empty(&mp->m_sync_list))
532 timeleft = schedule_timeout_interruptible(timeleft);
533
534 try_to_freeze();
535 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
536 break;
537
538 spin_lock(&mp->m_sync_lock);
539
540
541
542
543
544 if (!timeleft || list_empty(&mp->m_sync_list)) {
545 if (!timeleft)
546 timeleft = xfs_syncd_centisecs *
547 msecs_to_jiffies(10);
548 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
549 list_add_tail(&mp->m_sync_work.w_list,
550 &mp->m_sync_list);
551 }
552 list_splice_init(&mp->m_sync_list, &tmp);
553 spin_unlock(&mp->m_sync_lock);
554
555 list_for_each_entry_safe(work, n, &tmp, w_list) {
556 (*work->w_syncer)(mp, work->w_data);
557 list_del(&work->w_list);
558 if (work == &mp->m_sync_work)
559 continue;
560 if (work->w_completion)
561 complete(work->w_completion);
562 kmem_free(work);
563 }
564 }
565
566 return 0;
567}
568
569int
570xfs_syncd_init(
571 struct xfs_mount *mp)
572{
573 mp->m_sync_work.w_syncer = xfs_sync_worker;
574 mp->m_sync_work.w_mount = mp;
575 mp->m_sync_work.w_completion = NULL;
576 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
577 if (IS_ERR(mp->m_sync_task))
578 return -PTR_ERR(mp->m_sync_task);
579 return 0;
580}
581
582void
583xfs_syncd_stop(
584 struct xfs_mount *mp)
585{
586 kthread_stop(mp->m_sync_task);
587}
588
589void
590__xfs_inode_set_reclaim_tag(
591 struct xfs_perag *pag,
592 struct xfs_inode *ip)
593{
594 radix_tree_tag_set(&pag->pag_ici_root,
595 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
596 XFS_ICI_RECLAIM_TAG);
597
598 if (!pag->pag_ici_reclaimable) {
599
600 spin_lock(&ip->i_mount->m_perag_lock);
601 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
602 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
603 XFS_ICI_RECLAIM_TAG);
604 spin_unlock(&ip->i_mount->m_perag_lock);
605 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
606 -1, _RET_IP_);
607 }
608 pag->pag_ici_reclaimable++;
609}
610
611
612
613
614
615
616void
617xfs_inode_set_reclaim_tag(
618 xfs_inode_t *ip)
619{
620 struct xfs_mount *mp = ip->i_mount;
621 struct xfs_perag *pag;
622
623 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
624 spin_lock(&pag->pag_ici_lock);
625 spin_lock(&ip->i_flags_lock);
626 __xfs_inode_set_reclaim_tag(pag, ip);
627 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
628 spin_unlock(&ip->i_flags_lock);
629 spin_unlock(&pag->pag_ici_lock);
630 xfs_perag_put(pag);
631}
632
633STATIC void
634__xfs_inode_clear_reclaim(
635 xfs_perag_t *pag,
636 xfs_inode_t *ip)
637{
638 pag->pag_ici_reclaimable--;
639 if (!pag->pag_ici_reclaimable) {
640
641 spin_lock(&ip->i_mount->m_perag_lock);
642 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
643 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
644 XFS_ICI_RECLAIM_TAG);
645 spin_unlock(&ip->i_mount->m_perag_lock);
646 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
647 -1, _RET_IP_);
648 }
649}
650
651void
652__xfs_inode_clear_reclaim_tag(
653 xfs_mount_t *mp,
654 xfs_perag_t *pag,
655 xfs_inode_t *ip)
656{
657 radix_tree_tag_clear(&pag->pag_ici_root,
658 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
659 __xfs_inode_clear_reclaim(pag, ip);
660}
661
662
663
664
665
666STATIC int
667xfs_reclaim_inode_grab(
668 struct xfs_inode *ip,
669 int flags)
670{
671 ASSERT(rcu_read_lock_held());
672
673
674 if (!ip->i_ino)
675 return 1;
676
677
678
679
680
681
682 if ((flags & SYNC_TRYLOCK) &&
683 (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
684 return 1;
685 }
686
687
688
689
690
691
692
693
694
695
696
697 spin_lock(&ip->i_flags_lock);
698 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
699 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
700
701 spin_unlock(&ip->i_flags_lock);
702 return 1;
703 }
704 __xfs_iflags_set(ip, XFS_IRECLAIM);
705 spin_unlock(&ip->i_flags_lock);
706 return 0;
707}
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759STATIC int
760xfs_reclaim_inode(
761 struct xfs_inode *ip,
762 struct xfs_perag *pag,
763 int sync_mode)
764{
765 int error = 0;
766
767 xfs_ilock(ip, XFS_ILOCK_EXCL);
768 if (!xfs_iflock_nowait(ip)) {
769 if (!(sync_mode & SYNC_WAIT))
770 goto out;
771 xfs_iflock(ip);
772 }
773
774 if (is_bad_inode(VFS_I(ip)))
775 goto reclaim;
776 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
777 xfs_iunpin_wait(ip);
778 goto reclaim;
779 }
780 if (xfs_ipincount(ip)) {
781 if (!(sync_mode & SYNC_WAIT)) {
782 xfs_ifunlock(ip);
783 goto out;
784 }
785 xfs_iunpin_wait(ip);
786 }
787 if (xfs_iflags_test(ip, XFS_ISTALE))
788 goto reclaim;
789 if (xfs_inode_clean(ip))
790 goto reclaim;
791
792
793 error = xfs_iflush(ip, sync_mode);
794 if (sync_mode & SYNC_WAIT) {
795 xfs_iflock(ip);
796 goto reclaim;
797 }
798
799
800
801
802
803
804
805
806
807
808 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
809 xfs_fs_cmn_err(CE_WARN, ip->i_mount,
810 "inode 0x%llx background reclaim flush failed with %d",
811 (long long)ip->i_ino, error);
812 }
813out:
814 xfs_iflags_clear(ip, XFS_IRECLAIM);
815 xfs_iunlock(ip, XFS_ILOCK_EXCL);
816
817
818
819
820
821
822
823 return 0;
824
825reclaim:
826 xfs_ifunlock(ip);
827 xfs_iunlock(ip, XFS_ILOCK_EXCL);
828
829 XFS_STATS_INC(xs_ig_reclaims);
830
831
832
833
834
835
836
837 spin_lock(&pag->pag_ici_lock);
838 if (!radix_tree_delete(&pag->pag_ici_root,
839 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
840 ASSERT(0);
841 __xfs_inode_clear_reclaim(pag, ip);
842 spin_unlock(&pag->pag_ici_lock);
843
844
845
846
847
848
849
850
851
852
853
854 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
855 xfs_qm_dqdetach(ip);
856 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
857
858 xfs_inode_free(ip);
859 return error;
860
861}
862
863
864
865
866
867
868
869int
870xfs_reclaim_inodes_ag(
871 struct xfs_mount *mp,
872 int flags,
873 int *nr_to_scan)
874{
875 struct xfs_perag *pag;
876 int error = 0;
877 int last_error = 0;
878 xfs_agnumber_t ag;
879 int trylock = flags & SYNC_TRYLOCK;
880 int skipped;
881
882restart:
883 ag = 0;
884 skipped = 0;
885 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
886 unsigned long first_index = 0;
887 int done = 0;
888 int nr_found = 0;
889
890 ag = pag->pag_agno + 1;
891
892 if (trylock) {
893 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
894 skipped++;
895 xfs_perag_put(pag);
896 continue;
897 }
898 first_index = pag->pag_ici_reclaim_cursor;
899 } else
900 mutex_lock(&pag->pag_ici_reclaim_lock);
901
902 do {
903 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
904 int i;
905
906 rcu_read_lock();
907 nr_found = radix_tree_gang_lookup_tag(
908 &pag->pag_ici_root,
909 (void **)batch, first_index,
910 XFS_LOOKUP_BATCH,
911 XFS_ICI_RECLAIM_TAG);
912 if (!nr_found) {
913 rcu_read_unlock();
914 break;
915 }
916
917
918
919
920
921 for (i = 0; i < nr_found; i++) {
922 struct xfs_inode *ip = batch[i];
923
924 if (done || xfs_reclaim_inode_grab(ip, flags))
925 batch[i] = NULL;
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
942 pag->pag_agno)
943 continue;
944 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
945 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
946 done = 1;
947 }
948
949
950 rcu_read_unlock();
951
952 for (i = 0; i < nr_found; i++) {
953 if (!batch[i])
954 continue;
955 error = xfs_reclaim_inode(batch[i], pag, flags);
956 if (error && last_error != EFSCORRUPTED)
957 last_error = error;
958 }
959
960 *nr_to_scan -= XFS_LOOKUP_BATCH;
961
962 } while (nr_found && !done && *nr_to_scan > 0);
963
964 if (trylock && !done)
965 pag->pag_ici_reclaim_cursor = first_index;
966 else
967 pag->pag_ici_reclaim_cursor = 0;
968 mutex_unlock(&pag->pag_ici_reclaim_lock);
969 xfs_perag_put(pag);
970 }
971
972
973
974
975
976
977
978
979 if (trylock && skipped && *nr_to_scan > 0) {
980 trylock = 0;
981 goto restart;
982 }
983 return XFS_ERROR(last_error);
984}
985
986int
987xfs_reclaim_inodes(
988 xfs_mount_t *mp,
989 int mode)
990{
991 int nr_to_scan = INT_MAX;
992
993 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
994}
995
996
997
998
999static int
1000xfs_reclaim_inode_shrink(
1001 struct shrinker *shrink,
1002 int nr_to_scan,
1003 gfp_t gfp_mask)
1004{
1005 struct xfs_mount *mp;
1006 struct xfs_perag *pag;
1007 xfs_agnumber_t ag;
1008 int reclaimable;
1009
1010 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1011 if (nr_to_scan) {
1012 if (!(gfp_mask & __GFP_FS))
1013 return -1;
1014
1015 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
1016
1017 if (nr_to_scan > 0)
1018 return -1;
1019 }
1020
1021 reclaimable = 0;
1022 ag = 0;
1023 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1024 ag = pag->pag_agno + 1;
1025 reclaimable += pag->pag_ici_reclaimable;
1026 xfs_perag_put(pag);
1027 }
1028 return reclaimable;
1029}
1030
1031void
1032xfs_inode_shrinker_register(
1033 struct xfs_mount *mp)
1034{
1035 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
1036 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
1037 register_shrinker(&mp->m_inode_shrink);
1038}
1039
1040void
1041xfs_inode_shrinker_unregister(
1042 struct xfs_mount *mp)
1043{
1044 unregister_shrinker(&mp->m_inode_shrink);
1045}
1046