1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_log.h"
22#include "xfs_log_priv.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_dinode.h"
32#include "xfs_error.h"
33#include "xfs_filestream.h"
34#include "xfs_vnodeops.h"
35#include "xfs_inode_item.h"
36#include "xfs_quota.h"
37#include "xfs_trace.h"
38#include "xfs_fsops.h"
39#include "xfs_icache.h"
40
41#include <linux/kthread.h>
42#include <linux/freezer.h>
43
44STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
45 struct xfs_perag *pag, struct xfs_inode *ip);
46
47
48
49
50STATIC struct xfs_inode *
51xfs_inode_alloc(
52 struct xfs_mount *mp,
53 xfs_ino_t ino)
54{
55 struct xfs_inode *ip;
56
57
58
59
60
61
62 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
63 if (!ip)
64 return NULL;
65 if (inode_init_always(mp->m_super, VFS_I(ip))) {
66 kmem_zone_free(xfs_inode_zone, ip);
67 return NULL;
68 }
69
70 ASSERT(atomic_read(&ip->i_pincount) == 0);
71 ASSERT(!spin_is_locked(&ip->i_flags_lock));
72 ASSERT(!xfs_isiflocked(ip));
73 ASSERT(ip->i_ino == 0);
74
75 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
76
77
78 ip->i_ino = ino;
79 ip->i_mount = mp;
80 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 ip->i_afp = NULL;
82 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 ip->i_flags = 0;
84 ip->i_delayed_blks = 0;
85 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
86
87 return ip;
88}
89
90STATIC void
91xfs_inode_free_callback(
92 struct rcu_head *head)
93{
94 struct inode *inode = container_of(head, struct inode, i_rcu);
95 struct xfs_inode *ip = XFS_I(inode);
96
97 kmem_zone_free(xfs_inode_zone, ip);
98}
99
100STATIC void
101xfs_inode_free(
102 struct xfs_inode *ip)
103{
104 switch (ip->i_d.di_mode & S_IFMT) {
105 case S_IFREG:
106 case S_IFDIR:
107 case S_IFLNK:
108 xfs_idestroy_fork(ip, XFS_DATA_FORK);
109 break;
110 }
111
112 if (ip->i_afp)
113 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
114
115 if (ip->i_itemp) {
116 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
117 xfs_inode_item_destroy(ip);
118 ip->i_itemp = NULL;
119 }
120
121
122 ASSERT(atomic_read(&ip->i_pincount) == 0);
123 ASSERT(!spin_is_locked(&ip->i_flags_lock));
124 ASSERT(!xfs_isiflocked(ip));
125
126
127
128
129
130
131
132 spin_lock(&ip->i_flags_lock);
133 ip->i_flags = XFS_IRECLAIM;
134 ip->i_ino = 0;
135 spin_unlock(&ip->i_flags_lock);
136
137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
138}
139
140
141
142
143static int
144xfs_iget_cache_hit(
145 struct xfs_perag *pag,
146 struct xfs_inode *ip,
147 xfs_ino_t ino,
148 int flags,
149 int lock_flags) __releases(RCU)
150{
151 struct inode *inode = VFS_I(ip);
152 struct xfs_mount *mp = ip->i_mount;
153 int error;
154
155
156
157
158
159
160
161
162 spin_lock(&ip->i_flags_lock);
163 if (ip->i_ino != ino) {
164 trace_xfs_iget_skip(ip);
165 XFS_STATS_INC(xs_ig_frecycle);
166 error = EAGAIN;
167 goto out_error;
168 }
169
170
171
172
173
174
175
176
177
178
179
180
181 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
182 trace_xfs_iget_skip(ip);
183 XFS_STATS_INC(xs_ig_frecycle);
184 error = EAGAIN;
185 goto out_error;
186 }
187
188
189
190
191 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
192 error = ENOENT;
193 goto out_error;
194 }
195
196
197
198
199
200 if (ip->i_flags & XFS_IRECLAIMABLE) {
201 trace_xfs_iget_reclaim(ip);
202
203
204
205
206
207
208
209 ip->i_flags |= XFS_IRECLAIM;
210
211 spin_unlock(&ip->i_flags_lock);
212 rcu_read_unlock();
213
214 error = -inode_init_always(mp->m_super, inode);
215 if (error) {
216
217
218
219
220 rcu_read_lock();
221 spin_lock(&ip->i_flags_lock);
222
223 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
224 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
225 trace_xfs_iget_reclaim_fail(ip);
226 goto out_error;
227 }
228
229 spin_lock(&pag->pag_ici_lock);
230 spin_lock(&ip->i_flags_lock);
231
232
233
234
235
236
237 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
238 ip->i_flags |= XFS_INEW;
239 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
240 inode->i_state = I_NEW;
241
242 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
243 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
244
245 spin_unlock(&ip->i_flags_lock);
246 spin_unlock(&pag->pag_ici_lock);
247 } else {
248
249 if (!igrab(inode)) {
250 trace_xfs_iget_skip(ip);
251 error = EAGAIN;
252 goto out_error;
253 }
254
255
256 spin_unlock(&ip->i_flags_lock);
257 rcu_read_unlock();
258 trace_xfs_iget_hit(ip);
259 }
260
261 if (lock_flags != 0)
262 xfs_ilock(ip, lock_flags);
263
264 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
265 XFS_STATS_INC(xs_ig_found);
266
267 return 0;
268
269out_error:
270 spin_unlock(&ip->i_flags_lock);
271 rcu_read_unlock();
272 return error;
273}
274
275
276static int
277xfs_iget_cache_miss(
278 struct xfs_mount *mp,
279 struct xfs_perag *pag,
280 xfs_trans_t *tp,
281 xfs_ino_t ino,
282 struct xfs_inode **ipp,
283 int flags,
284 int lock_flags)
285{
286 struct xfs_inode *ip;
287 int error;
288 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
289 int iflags;
290
291 ip = xfs_inode_alloc(mp, ino);
292 if (!ip)
293 return ENOMEM;
294
295 error = xfs_iread(mp, tp, ip, flags);
296 if (error)
297 goto out_destroy;
298
299 trace_xfs_iget_miss(ip);
300
301 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
302 error = ENOENT;
303 goto out_destroy;
304 }
305
306
307
308
309
310
311
312 if (radix_tree_preload(GFP_NOFS)) {
313 error = EAGAIN;
314 goto out_destroy;
315 }
316
317
318
319
320
321 if (lock_flags) {
322 if (!xfs_ilock_nowait(ip, lock_flags))
323 BUG();
324 }
325
326
327
328
329
330
331
332
333
334
335 iflags = XFS_INEW;
336 if (flags & XFS_IGET_DONTCACHE)
337 iflags |= XFS_IDONTCACHE;
338 ip->i_udquot = ip->i_gdquot = NULL;
339 xfs_iflags_set(ip, iflags);
340
341
342 spin_lock(&pag->pag_ici_lock);
343 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
344 if (unlikely(error)) {
345 WARN_ON(error != -EEXIST);
346 XFS_STATS_INC(xs_ig_dup);
347 error = EAGAIN;
348 goto out_preload_end;
349 }
350 spin_unlock(&pag->pag_ici_lock);
351 radix_tree_preload_end();
352
353 *ipp = ip;
354 return 0;
355
356out_preload_end:
357 spin_unlock(&pag->pag_ici_lock);
358 radix_tree_preload_end();
359 if (lock_flags)
360 xfs_iunlock(ip, lock_flags);
361out_destroy:
362 __destroy_inode(VFS_I(ip));
363 xfs_inode_free(ip);
364 return error;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389int
390xfs_iget(
391 xfs_mount_t *mp,
392 xfs_trans_t *tp,
393 xfs_ino_t ino,
394 uint flags,
395 uint lock_flags,
396 xfs_inode_t **ipp)
397{
398 xfs_inode_t *ip;
399 int error;
400 xfs_perag_t *pag;
401 xfs_agino_t agino;
402
403
404
405
406
407
408
409
410 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
411
412
413 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
414 return EINVAL;
415
416
417 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
418 agino = XFS_INO_TO_AGINO(mp, ino);
419
420again:
421 error = 0;
422 rcu_read_lock();
423 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
424
425 if (ip) {
426 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
427 if (error)
428 goto out_error_or_again;
429 } else {
430 rcu_read_unlock();
431 XFS_STATS_INC(xs_ig_missed);
432
433 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
434 flags, lock_flags);
435 if (error)
436 goto out_error_or_again;
437 }
438 xfs_perag_put(pag);
439
440 *ipp = ip;
441
442
443
444
445
446 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
447 xfs_setup_inode(ip);
448 return 0;
449
450out_error_or_again:
451 if (error == EAGAIN) {
452 delay(1);
453 goto again;
454 }
455 xfs_perag_put(pag);
456 return error;
457}
458
459
460
461
462
463
464
465#define XFS_LOOKUP_BATCH 32
466
467STATIC int
468xfs_inode_ag_walk_grab(
469 struct xfs_inode *ip)
470{
471 struct inode *inode = VFS_I(ip);
472
473 ASSERT(rcu_read_lock_held());
474
475
476
477
478
479
480
481
482
483
484 spin_lock(&ip->i_flags_lock);
485 if (!ip->i_ino)
486 goto out_unlock_noent;
487
488
489 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
490 goto out_unlock_noent;
491 spin_unlock(&ip->i_flags_lock);
492
493
494 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
495 return EFSCORRUPTED;
496
497
498 if (!igrab(inode))
499 return ENOENT;
500
501 if (is_bad_inode(inode)) {
502 IRELE(ip);
503 return ENOENT;
504 }
505
506
507 return 0;
508
509out_unlock_noent:
510 spin_unlock(&ip->i_flags_lock);
511 return ENOENT;
512}
513
514STATIC int
515xfs_inode_ag_walk(
516 struct xfs_mount *mp,
517 struct xfs_perag *pag,
518 int (*execute)(struct xfs_inode *ip,
519 struct xfs_perag *pag, int flags,
520 void *args),
521 int flags,
522 void *args,
523 int tag)
524{
525 uint32_t first_index;
526 int last_error = 0;
527 int skipped;
528 int done;
529 int nr_found;
530
531restart:
532 done = 0;
533 skipped = 0;
534 first_index = 0;
535 nr_found = 0;
536 do {
537 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
538 int error = 0;
539 int i;
540
541 rcu_read_lock();
542
543 if (tag == -1)
544 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
545 (void **)batch, first_index,
546 XFS_LOOKUP_BATCH);
547 else
548 nr_found = radix_tree_gang_lookup_tag(
549 &pag->pag_ici_root,
550 (void **) batch, first_index,
551 XFS_LOOKUP_BATCH, tag);
552
553 if (!nr_found) {
554 rcu_read_unlock();
555 break;
556 }
557
558
559
560
561
562 for (i = 0; i < nr_found; i++) {
563 struct xfs_inode *ip = batch[i];
564
565 if (done || xfs_inode_ag_walk_grab(ip))
566 batch[i] = NULL;
567
568
569
570
571
572
573
574
575
576
577
578
579
580 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
581 continue;
582 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
583 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
584 done = 1;
585 }
586
587
588 rcu_read_unlock();
589
590 for (i = 0; i < nr_found; i++) {
591 if (!batch[i])
592 continue;
593 error = execute(batch[i], pag, flags, args);
594 IRELE(batch[i]);
595 if (error == EAGAIN) {
596 skipped++;
597 continue;
598 }
599 if (error && last_error != EFSCORRUPTED)
600 last_error = error;
601 }
602
603
604 if (error == EFSCORRUPTED)
605 break;
606
607 cond_resched();
608
609 } while (nr_found && !done);
610
611 if (skipped) {
612 delay(1);
613 goto restart;
614 }
615 return last_error;
616}
617
618
619
620
621
622STATIC void
623xfs_queue_eofblocks(
624 struct xfs_mount *mp)
625{
626 rcu_read_lock();
627 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
628 queue_delayed_work(mp->m_eofblocks_workqueue,
629 &mp->m_eofblocks_work,
630 msecs_to_jiffies(xfs_eofb_secs * 1000));
631 rcu_read_unlock();
632}
633
634void
635xfs_eofblocks_worker(
636 struct work_struct *work)
637{
638 struct xfs_mount *mp = container_of(to_delayed_work(work),
639 struct xfs_mount, m_eofblocks_work);
640 xfs_icache_free_eofblocks(mp, NULL);
641 xfs_queue_eofblocks(mp);
642}
643
644int
645xfs_inode_ag_iterator(
646 struct xfs_mount *mp,
647 int (*execute)(struct xfs_inode *ip,
648 struct xfs_perag *pag, int flags,
649 void *args),
650 int flags,
651 void *args)
652{
653 struct xfs_perag *pag;
654 int error = 0;
655 int last_error = 0;
656 xfs_agnumber_t ag;
657
658 ag = 0;
659 while ((pag = xfs_perag_get(mp, ag))) {
660 ag = pag->pag_agno + 1;
661 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
662 xfs_perag_put(pag);
663 if (error) {
664 last_error = error;
665 if (error == EFSCORRUPTED)
666 break;
667 }
668 }
669 return XFS_ERROR(last_error);
670}
671
672int
673xfs_inode_ag_iterator_tag(
674 struct xfs_mount *mp,
675 int (*execute)(struct xfs_inode *ip,
676 struct xfs_perag *pag, int flags,
677 void *args),
678 int flags,
679 void *args,
680 int tag)
681{
682 struct xfs_perag *pag;
683 int error = 0;
684 int last_error = 0;
685 xfs_agnumber_t ag;
686
687 ag = 0;
688 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
689 ag = pag->pag_agno + 1;
690 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
691 xfs_perag_put(pag);
692 if (error) {
693 last_error = error;
694 if (error == EFSCORRUPTED)
695 break;
696 }
697 }
698 return XFS_ERROR(last_error);
699}
700
701
702
703
704
705
706
707
708static void
709xfs_reclaim_work_queue(
710 struct xfs_mount *mp)
711{
712
713 rcu_read_lock();
714 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
715 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
716 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
717 }
718 rcu_read_unlock();
719}
720
721
722
723
724
725
726
727
728void
729xfs_reclaim_worker(
730 struct work_struct *work)
731{
732 struct xfs_mount *mp = container_of(to_delayed_work(work),
733 struct xfs_mount, m_reclaim_work);
734
735 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
736 xfs_reclaim_work_queue(mp);
737}
738
739static void
740__xfs_inode_set_reclaim_tag(
741 struct xfs_perag *pag,
742 struct xfs_inode *ip)
743{
744 radix_tree_tag_set(&pag->pag_ici_root,
745 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
746 XFS_ICI_RECLAIM_TAG);
747
748 if (!pag->pag_ici_reclaimable) {
749
750 spin_lock(&ip->i_mount->m_perag_lock);
751 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
752 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
753 XFS_ICI_RECLAIM_TAG);
754 spin_unlock(&ip->i_mount->m_perag_lock);
755
756
757 xfs_reclaim_work_queue(ip->i_mount);
758
759 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
760 -1, _RET_IP_);
761 }
762 pag->pag_ici_reclaimable++;
763}
764
765
766
767
768
769
770void
771xfs_inode_set_reclaim_tag(
772 xfs_inode_t *ip)
773{
774 struct xfs_mount *mp = ip->i_mount;
775 struct xfs_perag *pag;
776
777 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
778 spin_lock(&pag->pag_ici_lock);
779 spin_lock(&ip->i_flags_lock);
780 __xfs_inode_set_reclaim_tag(pag, ip);
781 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
782 spin_unlock(&ip->i_flags_lock);
783 spin_unlock(&pag->pag_ici_lock);
784 xfs_perag_put(pag);
785}
786
787STATIC void
788__xfs_inode_clear_reclaim(
789 xfs_perag_t *pag,
790 xfs_inode_t *ip)
791{
792 pag->pag_ici_reclaimable--;
793 if (!pag->pag_ici_reclaimable) {
794
795 spin_lock(&ip->i_mount->m_perag_lock);
796 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
797 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
798 XFS_ICI_RECLAIM_TAG);
799 spin_unlock(&ip->i_mount->m_perag_lock);
800 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
801 -1, _RET_IP_);
802 }
803}
804
805STATIC void
806__xfs_inode_clear_reclaim_tag(
807 xfs_mount_t *mp,
808 xfs_perag_t *pag,
809 xfs_inode_t *ip)
810{
811 radix_tree_tag_clear(&pag->pag_ici_root,
812 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
813 __xfs_inode_clear_reclaim(pag, ip);
814}
815
816
817
818
819
820STATIC int
821xfs_reclaim_inode_grab(
822 struct xfs_inode *ip,
823 int flags)
824{
825 ASSERT(rcu_read_lock_held());
826
827
828 if (!ip->i_ino)
829 return 1;
830
831
832
833
834
835
836 if ((flags & SYNC_TRYLOCK) &&
837 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
838 return 1;
839
840
841
842
843
844
845
846
847
848
849
850 spin_lock(&ip->i_flags_lock);
851 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
852 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
853
854 spin_unlock(&ip->i_flags_lock);
855 return 1;
856 }
857 __xfs_iflags_set(ip, XFS_IRECLAIM);
858 spin_unlock(&ip->i_flags_lock);
859 return 0;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901STATIC int
902xfs_reclaim_inode(
903 struct xfs_inode *ip,
904 struct xfs_perag *pag,
905 int sync_mode)
906{
907 struct xfs_buf *bp = NULL;
908 int error;
909
910restart:
911 error = 0;
912 xfs_ilock(ip, XFS_ILOCK_EXCL);
913 if (!xfs_iflock_nowait(ip)) {
914 if (!(sync_mode & SYNC_WAIT))
915 goto out;
916 xfs_iflock(ip);
917 }
918
919 if (is_bad_inode(VFS_I(ip)))
920 goto reclaim;
921 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
922 xfs_iunpin_wait(ip);
923 xfs_iflush_abort(ip, false);
924 goto reclaim;
925 }
926 if (xfs_ipincount(ip)) {
927 if (!(sync_mode & SYNC_WAIT))
928 goto out_ifunlock;
929 xfs_iunpin_wait(ip);
930 }
931 if (xfs_iflags_test(ip, XFS_ISTALE))
932 goto reclaim;
933 if (xfs_inode_clean(ip))
934 goto reclaim;
935
936
937
938
939
940 if (!(sync_mode & SYNC_WAIT))
941 goto out_ifunlock;
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 error = xfs_iflush(ip, &bp);
960 if (error == EAGAIN) {
961 xfs_iunlock(ip, XFS_ILOCK_EXCL);
962
963 delay(2);
964 goto restart;
965 }
966
967 if (!error) {
968 error = xfs_bwrite(bp);
969 xfs_buf_relse(bp);
970 }
971
972 xfs_iflock(ip);
973reclaim:
974 xfs_ifunlock(ip);
975 xfs_iunlock(ip, XFS_ILOCK_EXCL);
976
977 XFS_STATS_INC(xs_ig_reclaims);
978
979
980
981
982
983
984
985 spin_lock(&pag->pag_ici_lock);
986 if (!radix_tree_delete(&pag->pag_ici_root,
987 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
988 ASSERT(0);
989 __xfs_inode_clear_reclaim(pag, ip);
990 spin_unlock(&pag->pag_ici_lock);
991
992
993
994
995
996
997
998
999
1000 xfs_ilock(ip, XFS_ILOCK_EXCL);
1001 xfs_qm_dqdetach(ip);
1002 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1003
1004 xfs_inode_free(ip);
1005 return error;
1006
1007out_ifunlock:
1008 xfs_ifunlock(ip);
1009out:
1010 xfs_iflags_clear(ip, XFS_IRECLAIM);
1011 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1012
1013
1014
1015
1016
1017
1018
1019 return 0;
1020}
1021
1022
1023
1024
1025
1026
1027
1028STATIC int
1029xfs_reclaim_inodes_ag(
1030 struct xfs_mount *mp,
1031 int flags,
1032 int *nr_to_scan)
1033{
1034 struct xfs_perag *pag;
1035 int error = 0;
1036 int last_error = 0;
1037 xfs_agnumber_t ag;
1038 int trylock = flags & SYNC_TRYLOCK;
1039 int skipped;
1040
1041restart:
1042 ag = 0;
1043 skipped = 0;
1044 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1045 unsigned long first_index = 0;
1046 int done = 0;
1047 int nr_found = 0;
1048
1049 ag = pag->pag_agno + 1;
1050
1051 if (trylock) {
1052 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1053 skipped++;
1054 xfs_perag_put(pag);
1055 continue;
1056 }
1057 first_index = pag->pag_ici_reclaim_cursor;
1058 } else
1059 mutex_lock(&pag->pag_ici_reclaim_lock);
1060
1061 do {
1062 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1063 int i;
1064
1065 rcu_read_lock();
1066 nr_found = radix_tree_gang_lookup_tag(
1067 &pag->pag_ici_root,
1068 (void **)batch, first_index,
1069 XFS_LOOKUP_BATCH,
1070 XFS_ICI_RECLAIM_TAG);
1071 if (!nr_found) {
1072 done = 1;
1073 rcu_read_unlock();
1074 break;
1075 }
1076
1077
1078
1079
1080
1081 for (i = 0; i < nr_found; i++) {
1082 struct xfs_inode *ip = batch[i];
1083
1084 if (done || xfs_reclaim_inode_grab(ip, flags))
1085 batch[i] = NULL;
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1102 pag->pag_agno)
1103 continue;
1104 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1105 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1106 done = 1;
1107 }
1108
1109
1110 rcu_read_unlock();
1111
1112 for (i = 0; i < nr_found; i++) {
1113 if (!batch[i])
1114 continue;
1115 error = xfs_reclaim_inode(batch[i], pag, flags);
1116 if (error && last_error != EFSCORRUPTED)
1117 last_error = error;
1118 }
1119
1120 *nr_to_scan -= XFS_LOOKUP_BATCH;
1121
1122 cond_resched();
1123
1124 } while (nr_found && !done && *nr_to_scan > 0);
1125
1126 if (trylock && !done)
1127 pag->pag_ici_reclaim_cursor = first_index;
1128 else
1129 pag->pag_ici_reclaim_cursor = 0;
1130 mutex_unlock(&pag->pag_ici_reclaim_lock);
1131 xfs_perag_put(pag);
1132 }
1133
1134
1135
1136
1137
1138
1139
1140
1141 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1142 trylock = 0;
1143 goto restart;
1144 }
1145 return XFS_ERROR(last_error);
1146}
1147
1148int
1149xfs_reclaim_inodes(
1150 xfs_mount_t *mp,
1151 int mode)
1152{
1153 int nr_to_scan = INT_MAX;
1154
1155 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167void
1168xfs_reclaim_inodes_nr(
1169 struct xfs_mount *mp,
1170 int nr_to_scan)
1171{
1172
1173 xfs_reclaim_work_queue(mp);
1174 xfs_ail_push_all(mp->m_ail);
1175
1176 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1177}
1178
1179
1180
1181
1182
1183int
1184xfs_reclaim_inodes_count(
1185 struct xfs_mount *mp)
1186{
1187 struct xfs_perag *pag;
1188 xfs_agnumber_t ag = 0;
1189 int reclaimable = 0;
1190
1191 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1192 ag = pag->pag_agno + 1;
1193 reclaimable += pag->pag_ici_reclaimable;
1194 xfs_perag_put(pag);
1195 }
1196 return reclaimable;
1197}
1198
1199STATIC int
1200xfs_inode_match_id(
1201 struct xfs_inode *ip,
1202 struct xfs_eofblocks *eofb)
1203{
1204 if (eofb->eof_flags & XFS_EOF_FLAGS_UID &&
1205 ip->i_d.di_uid != eofb->eof_uid)
1206 return 0;
1207
1208 if (eofb->eof_flags & XFS_EOF_FLAGS_GID &&
1209 ip->i_d.di_gid != eofb->eof_gid)
1210 return 0;
1211
1212 if (eofb->eof_flags & XFS_EOF_FLAGS_PRID &&
1213 xfs_get_projid(ip) != eofb->eof_prid)
1214 return 0;
1215
1216 return 1;
1217}
1218
1219STATIC int
1220xfs_inode_free_eofblocks(
1221 struct xfs_inode *ip,
1222 struct xfs_perag *pag,
1223 int flags,
1224 void *args)
1225{
1226 int ret;
1227 struct xfs_eofblocks *eofb = args;
1228
1229 if (!xfs_can_free_eofblocks(ip, false)) {
1230
1231 trace_xfs_inode_free_eofblocks_invalid(ip);
1232 xfs_inode_clear_eofblocks_tag(ip);
1233 return 0;
1234 }
1235
1236
1237
1238
1239
1240 if (!(flags & SYNC_WAIT) &&
1241 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1242 return 0;
1243
1244 if (eofb) {
1245 if (!xfs_inode_match_id(ip, eofb))
1246 return 0;
1247
1248
1249 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1250 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1251 return 0;
1252 }
1253
1254 ret = xfs_free_eofblocks(ip->i_mount, ip, true);
1255
1256
1257 if (ret == EAGAIN && !(flags & SYNC_WAIT))
1258 ret = 0;
1259
1260 return ret;
1261}
1262
1263int
1264xfs_icache_free_eofblocks(
1265 struct xfs_mount *mp,
1266 struct xfs_eofblocks *eofb)
1267{
1268 int flags = SYNC_TRYLOCK;
1269
1270 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1271 flags = SYNC_WAIT;
1272
1273 return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1274 eofb, XFS_ICI_EOFBLOCKS_TAG);
1275}
1276
1277void
1278xfs_inode_set_eofblocks_tag(
1279 xfs_inode_t *ip)
1280{
1281 struct xfs_mount *mp = ip->i_mount;
1282 struct xfs_perag *pag;
1283 int tagged;
1284
1285 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1286 spin_lock(&pag->pag_ici_lock);
1287 trace_xfs_inode_set_eofblocks_tag(ip);
1288
1289 tagged = radix_tree_tagged(&pag->pag_ici_root,
1290 XFS_ICI_EOFBLOCKS_TAG);
1291 radix_tree_tag_set(&pag->pag_ici_root,
1292 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1293 XFS_ICI_EOFBLOCKS_TAG);
1294 if (!tagged) {
1295
1296 spin_lock(&ip->i_mount->m_perag_lock);
1297 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1298 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1299 XFS_ICI_EOFBLOCKS_TAG);
1300 spin_unlock(&ip->i_mount->m_perag_lock);
1301
1302
1303 xfs_queue_eofblocks(ip->i_mount);
1304
1305 trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1306 -1, _RET_IP_);
1307 }
1308
1309 spin_unlock(&pag->pag_ici_lock);
1310 xfs_perag_put(pag);
1311}
1312
1313void
1314xfs_inode_clear_eofblocks_tag(
1315 xfs_inode_t *ip)
1316{
1317 struct xfs_mount *mp = ip->i_mount;
1318 struct xfs_perag *pag;
1319
1320 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1321 spin_lock(&pag->pag_ici_lock);
1322 trace_xfs_inode_clear_eofblocks_tag(ip);
1323
1324 radix_tree_tag_clear(&pag->pag_ici_root,
1325 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1326 XFS_ICI_EOFBLOCKS_TAG);
1327 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1328
1329 spin_lock(&ip->i_mount->m_perag_lock);
1330 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1331 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1332 XFS_ICI_EOFBLOCKS_TAG);
1333 spin_unlock(&ip->i_mount->m_perag_lock);
1334 trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1335 -1, _RET_IP_);
1336 }
1337
1338 spin_unlock(&pag->pag_ici_lock);
1339 xfs_perag_put(pag);
1340}
1341
1342