1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36
37#include <linux/kthread.h>
38#include <linux/freezer.h>
39
40STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
41 struct xfs_perag *pag, struct xfs_inode *ip);
42
43
44
45
46struct xfs_inode *
47xfs_inode_alloc(
48 struct xfs_mount *mp,
49 xfs_ino_t ino)
50{
51 struct xfs_inode *ip;
52
53
54
55
56
57
58 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
59 if (!ip)
60 return NULL;
61 if (inode_init_always(mp->m_super, VFS_I(ip))) {
62 kmem_zone_free(xfs_inode_zone, ip);
63 return NULL;
64 }
65
66 XFS_STATS_INC(vn_active);
67 ASSERT(atomic_read(&ip->i_pincount) == 0);
68 ASSERT(!spin_is_locked(&ip->i_flags_lock));
69 ASSERT(!xfs_isiflocked(ip));
70 ASSERT(ip->i_ino == 0);
71
72 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
73
74
75 ip->i_ino = ino;
76 ip->i_mount = mp;
77 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
78 ip->i_afp = NULL;
79 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
80 ip->i_flags = 0;
81 ip->i_delayed_blks = 0;
82 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
83
84 return ip;
85}
86
87STATIC void
88xfs_inode_free_callback(
89 struct rcu_head *head)
90{
91 struct inode *inode = container_of(head, struct inode, i_rcu);
92 struct xfs_inode *ip = XFS_I(inode);
93
94 kmem_zone_free(xfs_inode_zone, ip);
95}
96
97void
98xfs_inode_free(
99 struct xfs_inode *ip)
100{
101 switch (ip->i_d.di_mode & S_IFMT) {
102 case S_IFREG:
103 case S_IFDIR:
104 case S_IFLNK:
105 xfs_idestroy_fork(ip, XFS_DATA_FORK);
106 break;
107 }
108
109 if (ip->i_afp)
110 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
111
112 if (ip->i_itemp) {
113 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
114 xfs_inode_item_destroy(ip);
115 ip->i_itemp = NULL;
116 }
117
118
119
120
121
122
123
124 spin_lock(&ip->i_flags_lock);
125 ip->i_flags = XFS_IRECLAIM;
126 ip->i_ino = 0;
127 spin_unlock(&ip->i_flags_lock);
128
129
130 ASSERT(atomic_read(&ip->i_pincount) == 0);
131 ASSERT(!xfs_isiflocked(ip));
132 XFS_STATS_DEC(vn_active);
133
134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
135}
136
137
138
139
140static int
141xfs_iget_cache_hit(
142 struct xfs_perag *pag,
143 struct xfs_inode *ip,
144 xfs_ino_t ino,
145 int flags,
146 int lock_flags) __releases(RCU)
147{
148 struct inode *inode = VFS_I(ip);
149 struct xfs_mount *mp = ip->i_mount;
150 int error;
151
152
153
154
155
156
157
158
159 spin_lock(&ip->i_flags_lock);
160 if (ip->i_ino != ino) {
161 trace_xfs_iget_skip(ip);
162 XFS_STATS_INC(xs_ig_frecycle);
163 error = -EAGAIN;
164 goto out_error;
165 }
166
167
168
169
170
171
172
173
174
175
176
177
178 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
179 trace_xfs_iget_skip(ip);
180 XFS_STATS_INC(xs_ig_frecycle);
181 error = -EAGAIN;
182 goto out_error;
183 }
184
185
186
187
188 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
189 error = -ENOENT;
190 goto out_error;
191 }
192
193
194
195
196
197 if (ip->i_flags & XFS_IRECLAIMABLE) {
198 trace_xfs_iget_reclaim(ip);
199
200
201
202
203
204
205
206 ip->i_flags |= XFS_IRECLAIM;
207
208 spin_unlock(&ip->i_flags_lock);
209 rcu_read_unlock();
210
211 error = inode_init_always(mp->m_super, inode);
212 if (error) {
213
214
215
216
217 rcu_read_lock();
218 spin_lock(&ip->i_flags_lock);
219
220 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
221 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
222 trace_xfs_iget_reclaim_fail(ip);
223 goto out_error;
224 }
225
226 spin_lock(&pag->pag_ici_lock);
227 spin_lock(&ip->i_flags_lock);
228
229
230
231
232
233
234 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
235 ip->i_flags |= XFS_INEW;
236 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
237 inode->i_state = I_NEW;
238
239 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
240 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
241
242 spin_unlock(&ip->i_flags_lock);
243 spin_unlock(&pag->pag_ici_lock);
244 } else {
245
246 if (!igrab(inode)) {
247 trace_xfs_iget_skip(ip);
248 error = -EAGAIN;
249 goto out_error;
250 }
251
252
253 spin_unlock(&ip->i_flags_lock);
254 rcu_read_unlock();
255 trace_xfs_iget_hit(ip);
256 }
257
258 if (lock_flags != 0)
259 xfs_ilock(ip, lock_flags);
260
261 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
262 XFS_STATS_INC(xs_ig_found);
263
264 return 0;
265
266out_error:
267 spin_unlock(&ip->i_flags_lock);
268 rcu_read_unlock();
269 return error;
270}
271
272
273static int
274xfs_iget_cache_miss(
275 struct xfs_mount *mp,
276 struct xfs_perag *pag,
277 xfs_trans_t *tp,
278 xfs_ino_t ino,
279 struct xfs_inode **ipp,
280 int flags,
281 int lock_flags)
282{
283 struct xfs_inode *ip;
284 int error;
285 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
286 int iflags;
287
288 ip = xfs_inode_alloc(mp, ino);
289 if (!ip)
290 return -ENOMEM;
291
292 error = xfs_iread(mp, tp, ip, flags);
293 if (error)
294 goto out_destroy;
295
296 trace_xfs_iget_miss(ip);
297
298 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
299 error = -ENOENT;
300 goto out_destroy;
301 }
302
303
304
305
306
307
308
309 if (radix_tree_preload(GFP_NOFS)) {
310 error = -EAGAIN;
311 goto out_destroy;
312 }
313
314
315
316
317
318 if (lock_flags) {
319 if (!xfs_ilock_nowait(ip, lock_flags))
320 BUG();
321 }
322
323
324
325
326
327
328
329
330
331
332 iflags = XFS_INEW;
333 if (flags & XFS_IGET_DONTCACHE)
334 iflags |= XFS_IDONTCACHE;
335 ip->i_udquot = NULL;
336 ip->i_gdquot = NULL;
337 ip->i_pdquot = NULL;
338 xfs_iflags_set(ip, iflags);
339
340
341 spin_lock(&pag->pag_ici_lock);
342 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
343 if (unlikely(error)) {
344 WARN_ON(error != -EEXIST);
345 XFS_STATS_INC(xs_ig_dup);
346 error = -EAGAIN;
347 goto out_preload_end;
348 }
349 spin_unlock(&pag->pag_ici_lock);
350 radix_tree_preload_end();
351
352 *ipp = ip;
353 return 0;
354
355out_preload_end:
356 spin_unlock(&pag->pag_ici_lock);
357 radix_tree_preload_end();
358 if (lock_flags)
359 xfs_iunlock(ip, lock_flags);
360out_destroy:
361 __destroy_inode(VFS_I(ip));
362 xfs_inode_free(ip);
363 return error;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388int
389xfs_iget(
390 xfs_mount_t *mp,
391 xfs_trans_t *tp,
392 xfs_ino_t ino,
393 uint flags,
394 uint lock_flags,
395 xfs_inode_t **ipp)
396{
397 xfs_inode_t *ip;
398 int error;
399 xfs_perag_t *pag;
400 xfs_agino_t agino;
401
402
403
404
405
406
407
408
409 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
410
411
412 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
413 return -EINVAL;
414
415 XFS_STATS_INC(xs_ig_attempts);
416
417
418 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
419 agino = XFS_INO_TO_AGINO(mp, ino);
420
421again:
422 error = 0;
423 rcu_read_lock();
424 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
425
426 if (ip) {
427 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
428 if (error)
429 goto out_error_or_again;
430 } else {
431 rcu_read_unlock();
432 XFS_STATS_INC(xs_ig_missed);
433
434 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
435 flags, lock_flags);
436 if (error)
437 goto out_error_or_again;
438 }
439 xfs_perag_put(pag);
440
441 *ipp = ip;
442
443
444
445
446
447 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
448 xfs_setup_existing_inode(ip);
449 return 0;
450
451out_error_or_again:
452 if (error == -EAGAIN) {
453 delay(1);
454 goto again;
455 }
456 xfs_perag_put(pag);
457 return error;
458}
459
460
461
462
463
464
465
466#define XFS_LOOKUP_BATCH 32
467
468STATIC int
469xfs_inode_ag_walk_grab(
470 struct xfs_inode *ip)
471{
472 struct inode *inode = VFS_I(ip);
473
474 ASSERT(rcu_read_lock_held());
475
476
477
478
479
480
481
482
483
484
485 spin_lock(&ip->i_flags_lock);
486 if (!ip->i_ino)
487 goto out_unlock_noent;
488
489
490 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
491 goto out_unlock_noent;
492 spin_unlock(&ip->i_flags_lock);
493
494
495 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
496 return -EFSCORRUPTED;
497
498
499 if (!igrab(inode))
500 return -ENOENT;
501
502
503 return 0;
504
505out_unlock_noent:
506 spin_unlock(&ip->i_flags_lock);
507 return -ENOENT;
508}
509
510STATIC int
511xfs_inode_ag_walk(
512 struct xfs_mount *mp,
513 struct xfs_perag *pag,
514 int (*execute)(struct xfs_inode *ip, int flags,
515 void *args),
516 int flags,
517 void *args,
518 int tag)
519{
520 uint32_t first_index;
521 int last_error = 0;
522 int skipped;
523 int done;
524 int nr_found;
525
526restart:
527 done = 0;
528 skipped = 0;
529 first_index = 0;
530 nr_found = 0;
531 do {
532 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
533 int error = 0;
534 int i;
535
536 rcu_read_lock();
537
538 if (tag == -1)
539 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
540 (void **)batch, first_index,
541 XFS_LOOKUP_BATCH);
542 else
543 nr_found = radix_tree_gang_lookup_tag(
544 &pag->pag_ici_root,
545 (void **) batch, first_index,
546 XFS_LOOKUP_BATCH, tag);
547
548 if (!nr_found) {
549 rcu_read_unlock();
550 break;
551 }
552
553
554
555
556
557 for (i = 0; i < nr_found; i++) {
558 struct xfs_inode *ip = batch[i];
559
560 if (done || xfs_inode_ag_walk_grab(ip))
561 batch[i] = NULL;
562
563
564
565
566
567
568
569
570
571
572
573
574
575 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
576 continue;
577 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
578 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
579 done = 1;
580 }
581
582
583 rcu_read_unlock();
584
585 for (i = 0; i < nr_found; i++) {
586 if (!batch[i])
587 continue;
588 error = execute(batch[i], flags, args);
589 IRELE(batch[i]);
590 if (error == -EAGAIN) {
591 skipped++;
592 continue;
593 }
594 if (error && last_error != -EFSCORRUPTED)
595 last_error = error;
596 }
597
598
599 if (error == -EFSCORRUPTED)
600 break;
601
602 cond_resched();
603
604 } while (nr_found && !done);
605
606 if (skipped) {
607 delay(1);
608 goto restart;
609 }
610 return last_error;
611}
612
613
614
615
616
617STATIC void
618xfs_queue_eofblocks(
619 struct xfs_mount *mp)
620{
621 rcu_read_lock();
622 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
623 queue_delayed_work(mp->m_eofblocks_workqueue,
624 &mp->m_eofblocks_work,
625 msecs_to_jiffies(xfs_eofb_secs * 1000));
626 rcu_read_unlock();
627}
628
629void
630xfs_eofblocks_worker(
631 struct work_struct *work)
632{
633 struct xfs_mount *mp = container_of(to_delayed_work(work),
634 struct xfs_mount, m_eofblocks_work);
635 xfs_icache_free_eofblocks(mp, NULL);
636 xfs_queue_eofblocks(mp);
637}
638
639int
640xfs_inode_ag_iterator(
641 struct xfs_mount *mp,
642 int (*execute)(struct xfs_inode *ip, int flags,
643 void *args),
644 int flags,
645 void *args)
646{
647 struct xfs_perag *pag;
648 int error = 0;
649 int last_error = 0;
650 xfs_agnumber_t ag;
651
652 ag = 0;
653 while ((pag = xfs_perag_get(mp, ag))) {
654 ag = pag->pag_agno + 1;
655 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
656 xfs_perag_put(pag);
657 if (error) {
658 last_error = error;
659 if (error == -EFSCORRUPTED)
660 break;
661 }
662 }
663 return last_error;
664}
665
666int
667xfs_inode_ag_iterator_tag(
668 struct xfs_mount *mp,
669 int (*execute)(struct xfs_inode *ip, int flags,
670 void *args),
671 int flags,
672 void *args,
673 int tag)
674{
675 struct xfs_perag *pag;
676 int error = 0;
677 int last_error = 0;
678 xfs_agnumber_t ag;
679
680 ag = 0;
681 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
682 ag = pag->pag_agno + 1;
683 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
684 xfs_perag_put(pag);
685 if (error) {
686 last_error = error;
687 if (error == -EFSCORRUPTED)
688 break;
689 }
690 }
691 return last_error;
692}
693
694
695
696
697
698
699
700
701static void
702xfs_reclaim_work_queue(
703 struct xfs_mount *mp)
704{
705
706 rcu_read_lock();
707 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
708 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
709 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
710 }
711 rcu_read_unlock();
712}
713
714
715
716
717
718
719
720
721void
722xfs_reclaim_worker(
723 struct work_struct *work)
724{
725 struct xfs_mount *mp = container_of(to_delayed_work(work),
726 struct xfs_mount, m_reclaim_work);
727
728 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
729 xfs_reclaim_work_queue(mp);
730}
731
732static void
733__xfs_inode_set_reclaim_tag(
734 struct xfs_perag *pag,
735 struct xfs_inode *ip)
736{
737 radix_tree_tag_set(&pag->pag_ici_root,
738 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
739 XFS_ICI_RECLAIM_TAG);
740
741 if (!pag->pag_ici_reclaimable) {
742
743 spin_lock(&ip->i_mount->m_perag_lock);
744 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
745 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
746 XFS_ICI_RECLAIM_TAG);
747 spin_unlock(&ip->i_mount->m_perag_lock);
748
749
750 xfs_reclaim_work_queue(ip->i_mount);
751
752 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
753 -1, _RET_IP_);
754 }
755 pag->pag_ici_reclaimable++;
756}
757
758
759
760
761
762
763void
764xfs_inode_set_reclaim_tag(
765 xfs_inode_t *ip)
766{
767 struct xfs_mount *mp = ip->i_mount;
768 struct xfs_perag *pag;
769
770 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
771 spin_lock(&pag->pag_ici_lock);
772 spin_lock(&ip->i_flags_lock);
773 __xfs_inode_set_reclaim_tag(pag, ip);
774 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
775 spin_unlock(&ip->i_flags_lock);
776 spin_unlock(&pag->pag_ici_lock);
777 xfs_perag_put(pag);
778}
779
780STATIC void
781__xfs_inode_clear_reclaim(
782 xfs_perag_t *pag,
783 xfs_inode_t *ip)
784{
785 pag->pag_ici_reclaimable--;
786 if (!pag->pag_ici_reclaimable) {
787
788 spin_lock(&ip->i_mount->m_perag_lock);
789 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
790 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
791 XFS_ICI_RECLAIM_TAG);
792 spin_unlock(&ip->i_mount->m_perag_lock);
793 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
794 -1, _RET_IP_);
795 }
796}
797
798STATIC void
799__xfs_inode_clear_reclaim_tag(
800 xfs_mount_t *mp,
801 xfs_perag_t *pag,
802 xfs_inode_t *ip)
803{
804 radix_tree_tag_clear(&pag->pag_ici_root,
805 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
806 __xfs_inode_clear_reclaim(pag, ip);
807}
808
809
810
811
812
813STATIC int
814xfs_reclaim_inode_grab(
815 struct xfs_inode *ip,
816 int flags)
817{
818 ASSERT(rcu_read_lock_held());
819
820
821 if (!ip->i_ino)
822 return 1;
823
824
825
826
827
828
829 if ((flags & SYNC_TRYLOCK) &&
830 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
831 return 1;
832
833
834
835
836
837
838
839
840
841
842
843 spin_lock(&ip->i_flags_lock);
844 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
845 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
846
847 spin_unlock(&ip->i_flags_lock);
848 return 1;
849 }
850 __xfs_iflags_set(ip, XFS_IRECLAIM);
851 spin_unlock(&ip->i_flags_lock);
852 return 0;
853}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894STATIC int
895xfs_reclaim_inode(
896 struct xfs_inode *ip,
897 struct xfs_perag *pag,
898 int sync_mode)
899{
900 struct xfs_buf *bp = NULL;
901 int error;
902
903restart:
904 error = 0;
905 xfs_ilock(ip, XFS_ILOCK_EXCL);
906 if (!xfs_iflock_nowait(ip)) {
907 if (!(sync_mode & SYNC_WAIT))
908 goto out;
909 xfs_iflock(ip);
910 }
911
912 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
913 xfs_iunpin_wait(ip);
914 xfs_iflush_abort(ip, false);
915 goto reclaim;
916 }
917 if (xfs_ipincount(ip)) {
918 if (!(sync_mode & SYNC_WAIT))
919 goto out_ifunlock;
920 xfs_iunpin_wait(ip);
921 }
922 if (xfs_iflags_test(ip, XFS_ISTALE))
923 goto reclaim;
924 if (xfs_inode_clean(ip))
925 goto reclaim;
926
927
928
929
930
931 if (!(sync_mode & SYNC_WAIT))
932 goto out_ifunlock;
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950 error = xfs_iflush(ip, &bp);
951 if (error == -EAGAIN) {
952 xfs_iunlock(ip, XFS_ILOCK_EXCL);
953
954 delay(2);
955 goto restart;
956 }
957
958 if (!error) {
959 error = xfs_bwrite(bp);
960 xfs_buf_relse(bp);
961 }
962
963 xfs_iflock(ip);
964reclaim:
965 xfs_ifunlock(ip);
966 xfs_iunlock(ip, XFS_ILOCK_EXCL);
967
968 XFS_STATS_INC(xs_ig_reclaims);
969
970
971
972
973
974
975
976 spin_lock(&pag->pag_ici_lock);
977 if (!radix_tree_delete(&pag->pag_ici_root,
978 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
979 ASSERT(0);
980 __xfs_inode_clear_reclaim(pag, ip);
981 spin_unlock(&pag->pag_ici_lock);
982
983
984
985
986
987
988
989
990
991 xfs_ilock(ip, XFS_ILOCK_EXCL);
992 xfs_qm_dqdetach(ip);
993 xfs_iunlock(ip, XFS_ILOCK_EXCL);
994
995 xfs_inode_free(ip);
996 return error;
997
998out_ifunlock:
999 xfs_ifunlock(ip);
1000out:
1001 xfs_iflags_clear(ip, XFS_IRECLAIM);
1002 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1003
1004
1005
1006
1007
1008
1009
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017
1018
1019STATIC int
1020xfs_reclaim_inodes_ag(
1021 struct xfs_mount *mp,
1022 int flags,
1023 int *nr_to_scan)
1024{
1025 struct xfs_perag *pag;
1026 int error = 0;
1027 int last_error = 0;
1028 xfs_agnumber_t ag;
1029 int trylock = flags & SYNC_TRYLOCK;
1030 int skipped;
1031
1032restart:
1033 ag = 0;
1034 skipped = 0;
1035 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1036 unsigned long first_index = 0;
1037 int done = 0;
1038 int nr_found = 0;
1039
1040 ag = pag->pag_agno + 1;
1041
1042 if (trylock) {
1043 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1044 skipped++;
1045 xfs_perag_put(pag);
1046 continue;
1047 }
1048 first_index = pag->pag_ici_reclaim_cursor;
1049 } else
1050 mutex_lock(&pag->pag_ici_reclaim_lock);
1051
1052 do {
1053 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1054 int i;
1055
1056 rcu_read_lock();
1057 nr_found = radix_tree_gang_lookup_tag(
1058 &pag->pag_ici_root,
1059 (void **)batch, first_index,
1060 XFS_LOOKUP_BATCH,
1061 XFS_ICI_RECLAIM_TAG);
1062 if (!nr_found) {
1063 done = 1;
1064 rcu_read_unlock();
1065 break;
1066 }
1067
1068
1069
1070
1071
1072 for (i = 0; i < nr_found; i++) {
1073 struct xfs_inode *ip = batch[i];
1074
1075 if (done || xfs_reclaim_inode_grab(ip, flags))
1076 batch[i] = NULL;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1093 pag->pag_agno)
1094 continue;
1095 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1096 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1097 done = 1;
1098 }
1099
1100
1101 rcu_read_unlock();
1102
1103 for (i = 0; i < nr_found; i++) {
1104 if (!batch[i])
1105 continue;
1106 error = xfs_reclaim_inode(batch[i], pag, flags);
1107 if (error && last_error != -EFSCORRUPTED)
1108 last_error = error;
1109 }
1110
1111 *nr_to_scan -= XFS_LOOKUP_BATCH;
1112
1113 cond_resched();
1114
1115 } while (nr_found && !done && *nr_to_scan > 0);
1116
1117 if (trylock && !done)
1118 pag->pag_ici_reclaim_cursor = first_index;
1119 else
1120 pag->pag_ici_reclaim_cursor = 0;
1121 mutex_unlock(&pag->pag_ici_reclaim_lock);
1122 xfs_perag_put(pag);
1123 }
1124
1125
1126
1127
1128
1129
1130
1131
1132 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1133 trylock = 0;
1134 goto restart;
1135 }
1136 return last_error;
1137}
1138
1139int
1140xfs_reclaim_inodes(
1141 xfs_mount_t *mp,
1142 int mode)
1143{
1144 int nr_to_scan = INT_MAX;
1145
1146 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158long
1159xfs_reclaim_inodes_nr(
1160 struct xfs_mount *mp,
1161 int nr_to_scan)
1162{
1163
1164 xfs_reclaim_work_queue(mp);
1165 xfs_ail_push_all(mp->m_ail);
1166
1167 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1168}
1169
1170
1171
1172
1173
1174int
1175xfs_reclaim_inodes_count(
1176 struct xfs_mount *mp)
1177{
1178 struct xfs_perag *pag;
1179 xfs_agnumber_t ag = 0;
1180 int reclaimable = 0;
1181
1182 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1183 ag = pag->pag_agno + 1;
1184 reclaimable += pag->pag_ici_reclaimable;
1185 xfs_perag_put(pag);
1186 }
1187 return reclaimable;
1188}
1189
1190STATIC int
1191xfs_inode_match_id(
1192 struct xfs_inode *ip,
1193 struct xfs_eofblocks *eofb)
1194{
1195 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1196 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1197 return 0;
1198
1199 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1200 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1201 return 0;
1202
1203 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1204 xfs_get_projid(ip) != eofb->eof_prid)
1205 return 0;
1206
1207 return 1;
1208}
1209
1210
1211
1212
1213
1214STATIC int
1215xfs_inode_match_id_union(
1216 struct xfs_inode *ip,
1217 struct xfs_eofblocks *eofb)
1218{
1219 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1220 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1221 return 1;
1222
1223 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1224 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1225 return 1;
1226
1227 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1228 xfs_get_projid(ip) == eofb->eof_prid)
1229 return 1;
1230
1231 return 0;
1232}
1233
1234STATIC int
1235xfs_inode_free_eofblocks(
1236 struct xfs_inode *ip,
1237 int flags,
1238 void *args)
1239{
1240 int ret;
1241 struct xfs_eofblocks *eofb = args;
1242 bool need_iolock = true;
1243 int match;
1244
1245 ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1246
1247 if (!xfs_can_free_eofblocks(ip, false)) {
1248
1249 trace_xfs_inode_free_eofblocks_invalid(ip);
1250 xfs_inode_clear_eofblocks_tag(ip);
1251 return 0;
1252 }
1253
1254
1255
1256
1257
1258 if (!(flags & SYNC_WAIT) &&
1259 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1260 return 0;
1261
1262 if (eofb) {
1263 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1264 match = xfs_inode_match_id_union(ip, eofb);
1265 else
1266 match = xfs_inode_match_id(ip, eofb);
1267 if (!match)
1268 return 0;
1269
1270
1271 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1272 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1273 return 0;
1274
1275
1276
1277
1278
1279
1280 if (eofb->eof_scan_owner == ip->i_ino)
1281 need_iolock = false;
1282 }
1283
1284 ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
1285
1286
1287 if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1288 ret = 0;
1289
1290 return ret;
1291}
1292
1293int
1294xfs_icache_free_eofblocks(
1295 struct xfs_mount *mp,
1296 struct xfs_eofblocks *eofb)
1297{
1298 int flags = SYNC_TRYLOCK;
1299
1300 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1301 flags = SYNC_WAIT;
1302
1303 return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1304 eofb, XFS_ICI_EOFBLOCKS_TAG);
1305}
1306
1307
1308
1309
1310
1311
1312
1313int
1314xfs_inode_free_quota_eofblocks(
1315 struct xfs_inode *ip)
1316{
1317 int scan = 0;
1318 struct xfs_eofblocks eofb = {0};
1319 struct xfs_dquot *dq;
1320
1321 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1322
1323
1324
1325
1326
1327
1328
1329 eofb.eof_scan_owner = ip->i_ino;
1330 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1331
1332 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1333 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1334 if (dq && xfs_dquot_lowsp(dq)) {
1335 eofb.eof_uid = VFS_I(ip)->i_uid;
1336 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1337 scan = 1;
1338 }
1339 }
1340
1341 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1342 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1343 if (dq && xfs_dquot_lowsp(dq)) {
1344 eofb.eof_gid = VFS_I(ip)->i_gid;
1345 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1346 scan = 1;
1347 }
1348 }
1349
1350 if (scan)
1351 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1352
1353 return scan;
1354}
1355
1356void
1357xfs_inode_set_eofblocks_tag(
1358 xfs_inode_t *ip)
1359{
1360 struct xfs_mount *mp = ip->i_mount;
1361 struct xfs_perag *pag;
1362 int tagged;
1363
1364 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1365 spin_lock(&pag->pag_ici_lock);
1366 trace_xfs_inode_set_eofblocks_tag(ip);
1367
1368 tagged = radix_tree_tagged(&pag->pag_ici_root,
1369 XFS_ICI_EOFBLOCKS_TAG);
1370 radix_tree_tag_set(&pag->pag_ici_root,
1371 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1372 XFS_ICI_EOFBLOCKS_TAG);
1373 if (!tagged) {
1374
1375 spin_lock(&ip->i_mount->m_perag_lock);
1376 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1377 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1378 XFS_ICI_EOFBLOCKS_TAG);
1379 spin_unlock(&ip->i_mount->m_perag_lock);
1380
1381
1382 xfs_queue_eofblocks(ip->i_mount);
1383
1384 trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1385 -1, _RET_IP_);
1386 }
1387
1388 spin_unlock(&pag->pag_ici_lock);
1389 xfs_perag_put(pag);
1390}
1391
1392void
1393xfs_inode_clear_eofblocks_tag(
1394 xfs_inode_t *ip)
1395{
1396 struct xfs_mount *mp = ip->i_mount;
1397 struct xfs_perag *pag;
1398
1399 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1400 spin_lock(&pag->pag_ici_lock);
1401 trace_xfs_inode_clear_eofblocks_tag(ip);
1402
1403 radix_tree_tag_clear(&pag->pag_ici_root,
1404 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1405 XFS_ICI_EOFBLOCKS_TAG);
1406 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1407
1408 spin_lock(&ip->i_mount->m_perag_lock);
1409 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1410 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1411 XFS_ICI_EOFBLOCKS_TAG);
1412 spin_unlock(&ip->i_mount->m_perag_lock);
1413 trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1414 -1, _RET_IP_);
1415 }
1416
1417 spin_unlock(&pag->pag_ici_lock);
1418 xfs_perag_put(pag);
1419}
1420
1421