1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h"
25#include "xfs_inode.h"
26#include "xfs_error.h"
27#include "xfs_trans.h"
28#include "xfs_trans_priv.h"
29#include "xfs_inode_item.h"
30#include "xfs_quota.h"
31#include "xfs_trace.h"
32#include "xfs_icache.h"
33#include "xfs_bmap_util.h"
34#include "xfs_dquot_item.h"
35#include "xfs_dquot.h"
36
37#include <linux/kthread.h>
38#include <linux/freezer.h>
39
40STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
41 struct xfs_perag *pag, struct xfs_inode *ip);
42
43
44
45
46struct xfs_inode *
47xfs_inode_alloc(
48 struct xfs_mount *mp,
49 xfs_ino_t ino)
50{
51 struct xfs_inode *ip;
52
53
54
55
56
57
58 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
59 if (!ip)
60 return NULL;
61 if (inode_init_always(mp->m_super, VFS_I(ip))) {
62 kmem_zone_free(xfs_inode_zone, ip);
63 return NULL;
64 }
65
66
67 VFS_I(ip)->i_mode = 0;
68
69 XFS_STATS_INC(mp, vn_active);
70 ASSERT(atomic_read(&ip->i_pincount) == 0);
71 ASSERT(!spin_is_locked(&ip->i_flags_lock));
72 ASSERT(!xfs_isiflocked(ip));
73 ASSERT(ip->i_ino == 0);
74
75 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
76
77
78 ip->i_ino = ino;
79 ip->i_mount = mp;
80 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 ip->i_afp = NULL;
82 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 ip->i_flags = 0;
84 ip->i_delayed_blks = 0;
85 memset(&ip->i_d, 0, sizeof(ip->i_d));
86
87 return ip;
88}
89
90STATIC void
91xfs_inode_free_callback(
92 struct rcu_head *head)
93{
94 struct inode *inode = container_of(head, struct inode, i_rcu);
95 struct xfs_inode *ip = XFS_I(inode);
96
97 kmem_zone_free(xfs_inode_zone, ip);
98}
99
100void
101xfs_inode_free(
102 struct xfs_inode *ip)
103{
104 switch (VFS_I(ip)->i_mode & S_IFMT) {
105 case S_IFREG:
106 case S_IFDIR:
107 case S_IFLNK:
108 xfs_idestroy_fork(ip, XFS_DATA_FORK);
109 break;
110 }
111
112 if (ip->i_afp)
113 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
114
115 if (ip->i_itemp) {
116 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
117 xfs_inode_item_destroy(ip);
118 ip->i_itemp = NULL;
119 }
120
121
122
123
124
125
126
127 spin_lock(&ip->i_flags_lock);
128 ip->i_flags = XFS_IRECLAIM;
129 ip->i_ino = 0;
130 spin_unlock(&ip->i_flags_lock);
131
132
133 ASSERT(atomic_read(&ip->i_pincount) == 0);
134 ASSERT(!xfs_isiflocked(ip));
135 XFS_STATS_DEC(ip->i_mount, vn_active);
136
137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
138}
139
140
141
142
143
144
145
146
147
148static int
149xfs_reinit_inode(
150 struct xfs_mount *mp,
151 struct inode *inode)
152{
153 int error;
154 uint32_t nlink = inode->i_nlink;
155 uint32_t generation = inode->i_generation;
156 uint64_t version = inode->i_version;
157 umode_t mode = inode->i_mode;
158
159 error = inode_init_always(mp->m_super, inode);
160
161 set_nlink(inode, nlink);
162 inode->i_generation = generation;
163 inode->i_version = version;
164 inode->i_mode = mode;
165 return error;
166}
167
168
169
170
171static int
172xfs_iget_cache_hit(
173 struct xfs_perag *pag,
174 struct xfs_inode *ip,
175 xfs_ino_t ino,
176 int flags,
177 int lock_flags) __releases(RCU)
178{
179 struct inode *inode = VFS_I(ip);
180 struct xfs_mount *mp = ip->i_mount;
181 int error;
182
183
184
185
186
187
188
189
190 spin_lock(&ip->i_flags_lock);
191 if (ip->i_ino != ino) {
192 trace_xfs_iget_skip(ip);
193 XFS_STATS_INC(mp, xs_ig_frecycle);
194 error = -EAGAIN;
195 goto out_error;
196 }
197
198
199
200
201
202
203
204
205
206
207
208
209 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
210 trace_xfs_iget_skip(ip);
211 XFS_STATS_INC(mp, xs_ig_frecycle);
212 error = -EAGAIN;
213 goto out_error;
214 }
215
216
217
218
219 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
220 error = -ENOENT;
221 goto out_error;
222 }
223
224
225
226
227
228 if (ip->i_flags & XFS_IRECLAIMABLE) {
229 trace_xfs_iget_reclaim(ip);
230
231
232
233
234
235
236
237 ip->i_flags |= XFS_IRECLAIM;
238
239 spin_unlock(&ip->i_flags_lock);
240 rcu_read_unlock();
241
242 error = xfs_reinit_inode(mp, inode);
243 if (error) {
244
245
246
247
248 rcu_read_lock();
249 spin_lock(&ip->i_flags_lock);
250
251 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
252 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
253 trace_xfs_iget_reclaim_fail(ip);
254 goto out_error;
255 }
256
257 spin_lock(&pag->pag_ici_lock);
258 spin_lock(&ip->i_flags_lock);
259
260
261
262
263
264
265 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
266 ip->i_flags |= XFS_INEW;
267 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
268 inode->i_state = I_NEW;
269
270 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
271 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
272
273 spin_unlock(&ip->i_flags_lock);
274 spin_unlock(&pag->pag_ici_lock);
275 } else {
276
277 if (!igrab(inode)) {
278 trace_xfs_iget_skip(ip);
279 error = -EAGAIN;
280 goto out_error;
281 }
282
283
284 spin_unlock(&ip->i_flags_lock);
285 rcu_read_unlock();
286 trace_xfs_iget_hit(ip);
287 }
288
289 if (lock_flags != 0)
290 xfs_ilock(ip, lock_flags);
291
292 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
293 XFS_STATS_INC(mp, xs_ig_found);
294
295 return 0;
296
297out_error:
298 spin_unlock(&ip->i_flags_lock);
299 rcu_read_unlock();
300 return error;
301}
302
303
304static int
305xfs_iget_cache_miss(
306 struct xfs_mount *mp,
307 struct xfs_perag *pag,
308 xfs_trans_t *tp,
309 xfs_ino_t ino,
310 struct xfs_inode **ipp,
311 int flags,
312 int lock_flags)
313{
314 struct xfs_inode *ip;
315 int error;
316 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
317 int iflags;
318
319 ip = xfs_inode_alloc(mp, ino);
320 if (!ip)
321 return -ENOMEM;
322
323 error = xfs_iread(mp, tp, ip, flags);
324 if (error)
325 goto out_destroy;
326
327 trace_xfs_iget_miss(ip);
328
329 if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
330 error = -ENOENT;
331 goto out_destroy;
332 }
333
334
335
336
337
338
339
340 if (radix_tree_preload(GFP_NOFS)) {
341 error = -EAGAIN;
342 goto out_destroy;
343 }
344
345
346
347
348
349 if (lock_flags) {
350 if (!xfs_ilock_nowait(ip, lock_flags))
351 BUG();
352 }
353
354
355
356
357
358
359
360
361
362
363 iflags = XFS_INEW;
364 if (flags & XFS_IGET_DONTCACHE)
365 iflags |= XFS_IDONTCACHE;
366 ip->i_udquot = NULL;
367 ip->i_gdquot = NULL;
368 ip->i_pdquot = NULL;
369 xfs_iflags_set(ip, iflags);
370
371
372 spin_lock(&pag->pag_ici_lock);
373 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
374 if (unlikely(error)) {
375 WARN_ON(error != -EEXIST);
376 XFS_STATS_INC(mp, xs_ig_dup);
377 error = -EAGAIN;
378 goto out_preload_end;
379 }
380 spin_unlock(&pag->pag_ici_lock);
381 radix_tree_preload_end();
382
383 *ipp = ip;
384 return 0;
385
386out_preload_end:
387 spin_unlock(&pag->pag_ici_lock);
388 radix_tree_preload_end();
389 if (lock_flags)
390 xfs_iunlock(ip, lock_flags);
391out_destroy:
392 __destroy_inode(VFS_I(ip));
393 xfs_inode_free(ip);
394 return error;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419int
420xfs_iget(
421 xfs_mount_t *mp,
422 xfs_trans_t *tp,
423 xfs_ino_t ino,
424 uint flags,
425 uint lock_flags,
426 xfs_inode_t **ipp)
427{
428 xfs_inode_t *ip;
429 int error;
430 xfs_perag_t *pag;
431 xfs_agino_t agino;
432
433
434
435
436
437
438
439
440 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
441
442
443 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
444 return -EINVAL;
445
446 XFS_STATS_INC(mp, xs_ig_attempts);
447
448
449 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
450 agino = XFS_INO_TO_AGINO(mp, ino);
451
452again:
453 error = 0;
454 rcu_read_lock();
455 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
456
457 if (ip) {
458 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
459 if (error)
460 goto out_error_or_again;
461 } else {
462 rcu_read_unlock();
463 XFS_STATS_INC(mp, xs_ig_missed);
464
465 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
466 flags, lock_flags);
467 if (error)
468 goto out_error_or_again;
469 }
470 xfs_perag_put(pag);
471
472 *ipp = ip;
473
474
475
476
477
478 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
479 xfs_setup_existing_inode(ip);
480 return 0;
481
482out_error_or_again:
483 if (error == -EAGAIN) {
484 delay(1);
485 goto again;
486 }
487 xfs_perag_put(pag);
488 return error;
489}
490
491
492
493
494
495
496
497#define XFS_LOOKUP_BATCH 32
498
499STATIC int
500xfs_inode_ag_walk_grab(
501 struct xfs_inode *ip)
502{
503 struct inode *inode = VFS_I(ip);
504
505 ASSERT(rcu_read_lock_held());
506
507
508
509
510
511
512
513
514
515
516 spin_lock(&ip->i_flags_lock);
517 if (!ip->i_ino)
518 goto out_unlock_noent;
519
520
521 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
522 goto out_unlock_noent;
523 spin_unlock(&ip->i_flags_lock);
524
525
526 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
527 return -EFSCORRUPTED;
528
529
530 if (!igrab(inode))
531 return -ENOENT;
532
533
534 return 0;
535
536out_unlock_noent:
537 spin_unlock(&ip->i_flags_lock);
538 return -ENOENT;
539}
540
541STATIC int
542xfs_inode_ag_walk(
543 struct xfs_mount *mp,
544 struct xfs_perag *pag,
545 int (*execute)(struct xfs_inode *ip, int flags,
546 void *args),
547 int flags,
548 void *args,
549 int tag)
550{
551 uint32_t first_index;
552 int last_error = 0;
553 int skipped;
554 int done;
555 int nr_found;
556
557restart:
558 done = 0;
559 skipped = 0;
560 first_index = 0;
561 nr_found = 0;
562 do {
563 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
564 int error = 0;
565 int i;
566
567 rcu_read_lock();
568
569 if (tag == -1)
570 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
571 (void **)batch, first_index,
572 XFS_LOOKUP_BATCH);
573 else
574 nr_found = radix_tree_gang_lookup_tag(
575 &pag->pag_ici_root,
576 (void **) batch, first_index,
577 XFS_LOOKUP_BATCH, tag);
578
579 if (!nr_found) {
580 rcu_read_unlock();
581 break;
582 }
583
584
585
586
587
588 for (i = 0; i < nr_found; i++) {
589 struct xfs_inode *ip = batch[i];
590
591 if (done || xfs_inode_ag_walk_grab(ip))
592 batch[i] = NULL;
593
594
595
596
597
598
599
600
601
602
603
604
605
606 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
607 continue;
608 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
609 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
610 done = 1;
611 }
612
613
614 rcu_read_unlock();
615
616 for (i = 0; i < nr_found; i++) {
617 if (!batch[i])
618 continue;
619 error = execute(batch[i], flags, args);
620 IRELE(batch[i]);
621 if (error == -EAGAIN) {
622 skipped++;
623 continue;
624 }
625 if (error && last_error != -EFSCORRUPTED)
626 last_error = error;
627 }
628
629
630 if (error == -EFSCORRUPTED)
631 break;
632
633 cond_resched();
634
635 } while (nr_found && !done);
636
637 if (skipped) {
638 delay(1);
639 goto restart;
640 }
641 return last_error;
642}
643
644
645
646
647
648STATIC void
649xfs_queue_eofblocks(
650 struct xfs_mount *mp)
651{
652 rcu_read_lock();
653 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
654 queue_delayed_work(mp->m_eofblocks_workqueue,
655 &mp->m_eofblocks_work,
656 msecs_to_jiffies(xfs_eofb_secs * 1000));
657 rcu_read_unlock();
658}
659
660void
661xfs_eofblocks_worker(
662 struct work_struct *work)
663{
664 struct xfs_mount *mp = container_of(to_delayed_work(work),
665 struct xfs_mount, m_eofblocks_work);
666 xfs_icache_free_eofblocks(mp, NULL);
667 xfs_queue_eofblocks(mp);
668}
669
670int
671xfs_inode_ag_iterator(
672 struct xfs_mount *mp,
673 int (*execute)(struct xfs_inode *ip, int flags,
674 void *args),
675 int flags,
676 void *args)
677{
678 struct xfs_perag *pag;
679 int error = 0;
680 int last_error = 0;
681 xfs_agnumber_t ag;
682
683 ag = 0;
684 while ((pag = xfs_perag_get(mp, ag))) {
685 ag = pag->pag_agno + 1;
686 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
687 xfs_perag_put(pag);
688 if (error) {
689 last_error = error;
690 if (error == -EFSCORRUPTED)
691 break;
692 }
693 }
694 return last_error;
695}
696
697int
698xfs_inode_ag_iterator_tag(
699 struct xfs_mount *mp,
700 int (*execute)(struct xfs_inode *ip, int flags,
701 void *args),
702 int flags,
703 void *args,
704 int tag)
705{
706 struct xfs_perag *pag;
707 int error = 0;
708 int last_error = 0;
709 xfs_agnumber_t ag;
710
711 ag = 0;
712 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
713 ag = pag->pag_agno + 1;
714 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
715 xfs_perag_put(pag);
716 if (error) {
717 last_error = error;
718 if (error == -EFSCORRUPTED)
719 break;
720 }
721 }
722 return last_error;
723}
724
725
726
727
728
729
730
731
732static void
733xfs_reclaim_work_queue(
734 struct xfs_mount *mp)
735{
736
737 rcu_read_lock();
738 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
739 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
740 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
741 }
742 rcu_read_unlock();
743}
744
745
746
747
748
749
750
751
752void
753xfs_reclaim_worker(
754 struct work_struct *work)
755{
756 struct xfs_mount *mp = container_of(to_delayed_work(work),
757 struct xfs_mount, m_reclaim_work);
758
759 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
760 xfs_reclaim_work_queue(mp);
761}
762
763static void
764__xfs_inode_set_reclaim_tag(
765 struct xfs_perag *pag,
766 struct xfs_inode *ip)
767{
768 radix_tree_tag_set(&pag->pag_ici_root,
769 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
770 XFS_ICI_RECLAIM_TAG);
771
772 if (!pag->pag_ici_reclaimable) {
773
774 spin_lock(&ip->i_mount->m_perag_lock);
775 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
776 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
777 XFS_ICI_RECLAIM_TAG);
778 spin_unlock(&ip->i_mount->m_perag_lock);
779
780
781 xfs_reclaim_work_queue(ip->i_mount);
782
783 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
784 -1, _RET_IP_);
785 }
786 pag->pag_ici_reclaimable++;
787}
788
789
790
791
792
793
794void
795xfs_inode_set_reclaim_tag(
796 xfs_inode_t *ip)
797{
798 struct xfs_mount *mp = ip->i_mount;
799 struct xfs_perag *pag;
800
801 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
802 spin_lock(&pag->pag_ici_lock);
803 spin_lock(&ip->i_flags_lock);
804 __xfs_inode_set_reclaim_tag(pag, ip);
805 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
806 spin_unlock(&ip->i_flags_lock);
807 spin_unlock(&pag->pag_ici_lock);
808 xfs_perag_put(pag);
809}
810
811STATIC void
812__xfs_inode_clear_reclaim(
813 xfs_perag_t *pag,
814 xfs_inode_t *ip)
815{
816 pag->pag_ici_reclaimable--;
817 if (!pag->pag_ici_reclaimable) {
818
819 spin_lock(&ip->i_mount->m_perag_lock);
820 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
821 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
822 XFS_ICI_RECLAIM_TAG);
823 spin_unlock(&ip->i_mount->m_perag_lock);
824 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
825 -1, _RET_IP_);
826 }
827}
828
829STATIC void
830__xfs_inode_clear_reclaim_tag(
831 xfs_mount_t *mp,
832 xfs_perag_t *pag,
833 xfs_inode_t *ip)
834{
835 radix_tree_tag_clear(&pag->pag_ici_root,
836 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
837 __xfs_inode_clear_reclaim(pag, ip);
838}
839
840
841
842
843
844STATIC int
845xfs_reclaim_inode_grab(
846 struct xfs_inode *ip,
847 int flags)
848{
849 ASSERT(rcu_read_lock_held());
850
851
852 if (!ip->i_ino)
853 return 1;
854
855
856
857
858
859
860 if ((flags & SYNC_TRYLOCK) &&
861 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
862 return 1;
863
864
865
866
867
868
869
870
871
872
873
874 spin_lock(&ip->i_flags_lock);
875 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
876 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
877
878 spin_unlock(&ip->i_flags_lock);
879 return 1;
880 }
881 __xfs_iflags_set(ip, XFS_IRECLAIM);
882 spin_unlock(&ip->i_flags_lock);
883 return 0;
884}
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925STATIC int
926xfs_reclaim_inode(
927 struct xfs_inode *ip,
928 struct xfs_perag *pag,
929 int sync_mode)
930{
931 struct xfs_buf *bp = NULL;
932 int error;
933
934restart:
935 error = 0;
936 xfs_ilock(ip, XFS_ILOCK_EXCL);
937 if (!xfs_iflock_nowait(ip)) {
938 if (!(sync_mode & SYNC_WAIT))
939 goto out;
940 xfs_iflock(ip);
941 }
942
943 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
944 xfs_iunpin_wait(ip);
945 xfs_iflush_abort(ip, false);
946 goto reclaim;
947 }
948 if (xfs_ipincount(ip)) {
949 if (!(sync_mode & SYNC_WAIT))
950 goto out_ifunlock;
951 xfs_iunpin_wait(ip);
952 }
953 if (xfs_iflags_test(ip, XFS_ISTALE))
954 goto reclaim;
955 if (xfs_inode_clean(ip))
956 goto reclaim;
957
958
959
960
961
962 if (!(sync_mode & SYNC_WAIT))
963 goto out_ifunlock;
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 error = xfs_iflush(ip, &bp);
982 if (error == -EAGAIN) {
983 xfs_iunlock(ip, XFS_ILOCK_EXCL);
984
985 delay(2);
986 goto restart;
987 }
988
989 if (!error) {
990 error = xfs_bwrite(bp);
991 xfs_buf_relse(bp);
992 }
993
994 xfs_iflock(ip);
995reclaim:
996 xfs_ifunlock(ip);
997 xfs_iunlock(ip, XFS_ILOCK_EXCL);
998
999 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1000
1001
1002
1003
1004
1005
1006
1007 spin_lock(&pag->pag_ici_lock);
1008 if (!radix_tree_delete(&pag->pag_ici_root,
1009 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
1010 ASSERT(0);
1011 __xfs_inode_clear_reclaim(pag, ip);
1012 spin_unlock(&pag->pag_ici_lock);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 xfs_ilock(ip, XFS_ILOCK_EXCL);
1023 xfs_qm_dqdetach(ip);
1024 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1025
1026 xfs_inode_free(ip);
1027 return error;
1028
1029out_ifunlock:
1030 xfs_ifunlock(ip);
1031out:
1032 xfs_iflags_clear(ip, XFS_IRECLAIM);
1033 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1034
1035
1036
1037
1038
1039
1040
1041 return 0;
1042}
1043
1044
1045
1046
1047
1048
1049
1050STATIC int
1051xfs_reclaim_inodes_ag(
1052 struct xfs_mount *mp,
1053 int flags,
1054 int *nr_to_scan)
1055{
1056 struct xfs_perag *pag;
1057 int error = 0;
1058 int last_error = 0;
1059 xfs_agnumber_t ag;
1060 int trylock = flags & SYNC_TRYLOCK;
1061 int skipped;
1062
1063restart:
1064 ag = 0;
1065 skipped = 0;
1066 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1067 unsigned long first_index = 0;
1068 int done = 0;
1069 int nr_found = 0;
1070
1071 ag = pag->pag_agno + 1;
1072
1073 if (trylock) {
1074 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1075 skipped++;
1076 xfs_perag_put(pag);
1077 continue;
1078 }
1079 first_index = pag->pag_ici_reclaim_cursor;
1080 } else
1081 mutex_lock(&pag->pag_ici_reclaim_lock);
1082
1083 do {
1084 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1085 int i;
1086
1087 rcu_read_lock();
1088 nr_found = radix_tree_gang_lookup_tag(
1089 &pag->pag_ici_root,
1090 (void **)batch, first_index,
1091 XFS_LOOKUP_BATCH,
1092 XFS_ICI_RECLAIM_TAG);
1093 if (!nr_found) {
1094 done = 1;
1095 rcu_read_unlock();
1096 break;
1097 }
1098
1099
1100
1101
1102
1103 for (i = 0; i < nr_found; i++) {
1104 struct xfs_inode *ip = batch[i];
1105
1106 if (done || xfs_reclaim_inode_grab(ip, flags))
1107 batch[i] = NULL;
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1124 pag->pag_agno)
1125 continue;
1126 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1127 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1128 done = 1;
1129 }
1130
1131
1132 rcu_read_unlock();
1133
1134 for (i = 0; i < nr_found; i++) {
1135 if (!batch[i])
1136 continue;
1137 error = xfs_reclaim_inode(batch[i], pag, flags);
1138 if (error && last_error != -EFSCORRUPTED)
1139 last_error = error;
1140 }
1141
1142 *nr_to_scan -= XFS_LOOKUP_BATCH;
1143
1144 cond_resched();
1145
1146 } while (nr_found && !done && *nr_to_scan > 0);
1147
1148 if (trylock && !done)
1149 pag->pag_ici_reclaim_cursor = first_index;
1150 else
1151 pag->pag_ici_reclaim_cursor = 0;
1152 mutex_unlock(&pag->pag_ici_reclaim_lock);
1153 xfs_perag_put(pag);
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1164 trylock = 0;
1165 goto restart;
1166 }
1167 return last_error;
1168}
1169
1170int
1171xfs_reclaim_inodes(
1172 xfs_mount_t *mp,
1173 int mode)
1174{
1175 int nr_to_scan = INT_MAX;
1176
1177 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189long
1190xfs_reclaim_inodes_nr(
1191 struct xfs_mount *mp,
1192 int nr_to_scan)
1193{
1194
1195 xfs_reclaim_work_queue(mp);
1196 xfs_ail_push_all(mp->m_ail);
1197
1198 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1199}
1200
1201
1202
1203
1204
1205int
1206xfs_reclaim_inodes_count(
1207 struct xfs_mount *mp)
1208{
1209 struct xfs_perag *pag;
1210 xfs_agnumber_t ag = 0;
1211 int reclaimable = 0;
1212
1213 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1214 ag = pag->pag_agno + 1;
1215 reclaimable += pag->pag_ici_reclaimable;
1216 xfs_perag_put(pag);
1217 }
1218 return reclaimable;
1219}
1220
1221STATIC int
1222xfs_inode_match_id(
1223 struct xfs_inode *ip,
1224 struct xfs_eofblocks *eofb)
1225{
1226 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1227 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1228 return 0;
1229
1230 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1231 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1232 return 0;
1233
1234 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1235 xfs_get_projid(ip) != eofb->eof_prid)
1236 return 0;
1237
1238 return 1;
1239}
1240
1241
1242
1243
1244
1245STATIC int
1246xfs_inode_match_id_union(
1247 struct xfs_inode *ip,
1248 struct xfs_eofblocks *eofb)
1249{
1250 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1251 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1252 return 1;
1253
1254 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1255 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1256 return 1;
1257
1258 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1259 xfs_get_projid(ip) == eofb->eof_prid)
1260 return 1;
1261
1262 return 0;
1263}
1264
1265STATIC int
1266xfs_inode_free_eofblocks(
1267 struct xfs_inode *ip,
1268 int flags,
1269 void *args)
1270{
1271 int ret;
1272 struct xfs_eofblocks *eofb = args;
1273 bool need_iolock = true;
1274 int match;
1275
1276 ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1277
1278 if (!xfs_can_free_eofblocks(ip, false)) {
1279
1280 trace_xfs_inode_free_eofblocks_invalid(ip);
1281 xfs_inode_clear_eofblocks_tag(ip);
1282 return 0;
1283 }
1284
1285
1286
1287
1288
1289 if (!(flags & SYNC_WAIT) &&
1290 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1291 return 0;
1292
1293 if (eofb) {
1294 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1295 match = xfs_inode_match_id_union(ip, eofb);
1296 else
1297 match = xfs_inode_match_id(ip, eofb);
1298 if (!match)
1299 return 0;
1300
1301
1302 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1303 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1304 return 0;
1305
1306
1307
1308
1309
1310
1311 if (eofb->eof_scan_owner == ip->i_ino)
1312 need_iolock = false;
1313 }
1314
1315 ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
1316
1317
1318 if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1319 ret = 0;
1320
1321 return ret;
1322}
1323
1324int
1325xfs_icache_free_eofblocks(
1326 struct xfs_mount *mp,
1327 struct xfs_eofblocks *eofb)
1328{
1329 int flags = SYNC_TRYLOCK;
1330
1331 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1332 flags = SYNC_WAIT;
1333
1334 return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1335 eofb, XFS_ICI_EOFBLOCKS_TAG);
1336}
1337
1338
1339
1340
1341
1342
1343
1344int
1345xfs_inode_free_quota_eofblocks(
1346 struct xfs_inode *ip)
1347{
1348 int scan = 0;
1349 struct xfs_eofblocks eofb = {0};
1350 struct xfs_dquot *dq;
1351
1352 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1353
1354
1355
1356
1357
1358
1359
1360 eofb.eof_scan_owner = ip->i_ino;
1361 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1362
1363 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1364 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1365 if (dq && xfs_dquot_lowsp(dq)) {
1366 eofb.eof_uid = VFS_I(ip)->i_uid;
1367 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1368 scan = 1;
1369 }
1370 }
1371
1372 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1373 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1374 if (dq && xfs_dquot_lowsp(dq)) {
1375 eofb.eof_gid = VFS_I(ip)->i_gid;
1376 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1377 scan = 1;
1378 }
1379 }
1380
1381 if (scan)
1382 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1383
1384 return scan;
1385}
1386
1387void
1388xfs_inode_set_eofblocks_tag(
1389 xfs_inode_t *ip)
1390{
1391 struct xfs_mount *mp = ip->i_mount;
1392 struct xfs_perag *pag;
1393 int tagged;
1394
1395 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1396 spin_lock(&pag->pag_ici_lock);
1397 trace_xfs_inode_set_eofblocks_tag(ip);
1398
1399 tagged = radix_tree_tagged(&pag->pag_ici_root,
1400 XFS_ICI_EOFBLOCKS_TAG);
1401 radix_tree_tag_set(&pag->pag_ici_root,
1402 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1403 XFS_ICI_EOFBLOCKS_TAG);
1404 if (!tagged) {
1405
1406 spin_lock(&ip->i_mount->m_perag_lock);
1407 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1408 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1409 XFS_ICI_EOFBLOCKS_TAG);
1410 spin_unlock(&ip->i_mount->m_perag_lock);
1411
1412
1413 xfs_queue_eofblocks(ip->i_mount);
1414
1415 trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1416 -1, _RET_IP_);
1417 }
1418
1419 spin_unlock(&pag->pag_ici_lock);
1420 xfs_perag_put(pag);
1421}
1422
1423void
1424xfs_inode_clear_eofblocks_tag(
1425 xfs_inode_t *ip)
1426{
1427 struct xfs_mount *mp = ip->i_mount;
1428 struct xfs_perag *pag;
1429
1430 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1431 spin_lock(&pag->pag_ici_lock);
1432 trace_xfs_inode_clear_eofblocks_tag(ip);
1433
1434 radix_tree_tag_clear(&pag->pag_ici_root,
1435 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1436 XFS_ICI_EOFBLOCKS_TAG);
1437 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1438
1439 spin_lock(&ip->i_mount->m_perag_lock);
1440 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1441 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1442 XFS_ICI_EOFBLOCKS_TAG);
1443 spin_unlock(&ip->i_mount->m_perag_lock);
1444 trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1445 -1, _RET_IP_);
1446 }
1447
1448 spin_unlock(&pag->pag_ici_lock);
1449 xfs_perag_put(pag);
1450}
1451
1452