1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_sb.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_error.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_inode_item.h"
18#include "xfs_quota.h"
19#include "xfs_trace.h"
20#include "xfs_icache.h"
21#include "xfs_bmap_util.h"
22#include "xfs_dquot_item.h"
23#include "xfs_dquot.h"
24#include "xfs_reflink.h"
25
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/iversion.h>
29
30
31
32
33struct xfs_inode *
34xfs_inode_alloc(
35 struct xfs_mount *mp,
36 xfs_ino_t ino)
37{
38 struct xfs_inode *ip;
39
40
41
42
43
44
45 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
46 if (!ip)
47 return NULL;
48 if (inode_init_always(mp->m_super, VFS_I(ip))) {
49 kmem_zone_free(xfs_inode_zone, ip);
50 return NULL;
51 }
52
53
54 VFS_I(ip)->i_mode = 0;
55
56 XFS_STATS_INC(mp, vn_active);
57 ASSERT(atomic_read(&ip->i_pincount) == 0);
58 ASSERT(!xfs_isiflocked(ip));
59 ASSERT(ip->i_ino == 0);
60
61
62 ip->i_ino = ino;
63 ip->i_mount = mp;
64 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
65 ip->i_afp = NULL;
66 ip->i_cowfp = NULL;
67 ip->i_cnextents = 0;
68 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
69 memset(&ip->i_df, 0, sizeof(ip->i_df));
70 ip->i_flags = 0;
71 ip->i_delayed_blks = 0;
72 memset(&ip->i_d, 0, sizeof(ip->i_d));
73
74 return ip;
75}
76
77STATIC void
78xfs_inode_free_callback(
79 struct rcu_head *head)
80{
81 struct inode *inode = container_of(head, struct inode, i_rcu);
82 struct xfs_inode *ip = XFS_I(inode);
83
84 switch (VFS_I(ip)->i_mode & S_IFMT) {
85 case S_IFREG:
86 case S_IFDIR:
87 case S_IFLNK:
88 xfs_idestroy_fork(ip, XFS_DATA_FORK);
89 break;
90 }
91
92 if (ip->i_afp)
93 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
94 if (ip->i_cowfp)
95 xfs_idestroy_fork(ip, XFS_COW_FORK);
96
97 if (ip->i_itemp) {
98 ASSERT(!test_bit(XFS_LI_IN_AIL,
99 &ip->i_itemp->ili_item.li_flags));
100 xfs_inode_item_destroy(ip);
101 ip->i_itemp = NULL;
102 }
103
104 kmem_zone_free(xfs_inode_zone, ip);
105}
106
107static void
108__xfs_inode_free(
109 struct xfs_inode *ip)
110{
111
112 ASSERT(atomic_read(&ip->i_pincount) == 0);
113 XFS_STATS_DEC(ip->i_mount, vn_active);
114
115 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
116}
117
118void
119xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 ASSERT(!xfs_isiflocked(ip));
123
124
125
126
127
128
129
130 spin_lock(&ip->i_flags_lock);
131 ip->i_flags = XFS_IRECLAIM;
132 ip->i_ino = 0;
133 spin_unlock(&ip->i_flags_lock);
134
135 __xfs_inode_free(ip);
136}
137
138
139
140
141
142
143
144
145static void
146xfs_reclaim_work_queue(
147 struct xfs_mount *mp)
148{
149
150 rcu_read_lock();
151 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
152 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
153 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
154 }
155 rcu_read_unlock();
156}
157
158
159
160
161
162
163
164
165void
166xfs_reclaim_worker(
167 struct work_struct *work)
168{
169 struct xfs_mount *mp = container_of(to_delayed_work(work),
170 struct xfs_mount, m_reclaim_work);
171
172 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
173 xfs_reclaim_work_queue(mp);
174}
175
176static void
177xfs_perag_set_reclaim_tag(
178 struct xfs_perag *pag)
179{
180 struct xfs_mount *mp = pag->pag_mount;
181
182 lockdep_assert_held(&pag->pag_ici_lock);
183 if (pag->pag_ici_reclaimable++)
184 return;
185
186
187 spin_lock(&mp->m_perag_lock);
188 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
189 XFS_ICI_RECLAIM_TAG);
190 spin_unlock(&mp->m_perag_lock);
191
192
193 xfs_reclaim_work_queue(mp);
194
195 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
196}
197
198static void
199xfs_perag_clear_reclaim_tag(
200 struct xfs_perag *pag)
201{
202 struct xfs_mount *mp = pag->pag_mount;
203
204 lockdep_assert_held(&pag->pag_ici_lock);
205 if (--pag->pag_ici_reclaimable)
206 return;
207
208
209 spin_lock(&mp->m_perag_lock);
210 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
211 XFS_ICI_RECLAIM_TAG);
212 spin_unlock(&mp->m_perag_lock);
213 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
214}
215
216
217
218
219
220
221
222void
223xfs_inode_set_reclaim_tag(
224 struct xfs_inode *ip)
225{
226 struct xfs_mount *mp = ip->i_mount;
227 struct xfs_perag *pag;
228
229 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
230 spin_lock(&pag->pag_ici_lock);
231 spin_lock(&ip->i_flags_lock);
232
233 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
234 XFS_ICI_RECLAIM_TAG);
235 xfs_perag_set_reclaim_tag(pag);
236 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
237
238 spin_unlock(&ip->i_flags_lock);
239 spin_unlock(&pag->pag_ici_lock);
240 xfs_perag_put(pag);
241}
242
243STATIC void
244xfs_inode_clear_reclaim_tag(
245 struct xfs_perag *pag,
246 xfs_ino_t ino)
247{
248 radix_tree_tag_clear(&pag->pag_ici_root,
249 XFS_INO_TO_AGINO(pag->pag_mount, ino),
250 XFS_ICI_RECLAIM_TAG);
251 xfs_perag_clear_reclaim_tag(pag);
252}
253
254static void
255xfs_inew_wait(
256 struct xfs_inode *ip)
257{
258 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
259 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
260
261 do {
262 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
263 if (!xfs_iflags_test(ip, XFS_INEW))
264 break;
265 schedule();
266 } while (true);
267 finish_wait(wq, &wait.wq_entry);
268}
269
270
271
272
273
274
275
276
277
278static int
279xfs_reinit_inode(
280 struct xfs_mount *mp,
281 struct inode *inode)
282{
283 int error;
284 uint32_t nlink = inode->i_nlink;
285 uint32_t generation = inode->i_generation;
286 uint64_t version = inode_peek_iversion(inode);
287 umode_t mode = inode->i_mode;
288 dev_t dev = inode->i_rdev;
289
290 error = inode_init_always(mp->m_super, inode);
291
292 set_nlink(inode, nlink);
293 inode->i_generation = generation;
294 inode_set_iversion_queried(inode, version);
295 inode->i_mode = mode;
296 inode->i_rdev = dev;
297 return error;
298}
299
300
301
302
303
304
305
306
307
308
309
310static int
311xfs_iget_check_free_state(
312 struct xfs_inode *ip,
313 int flags)
314{
315 if (flags & XFS_IGET_CREATE) {
316
317 if (VFS_I(ip)->i_mode != 0) {
318 xfs_warn(ip->i_mount,
319"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
320 ip->i_ino, VFS_I(ip)->i_mode);
321 return -EFSCORRUPTED;
322 }
323
324 if (ip->i_d.di_nblocks != 0) {
325 xfs_warn(ip->i_mount,
326"Corruption detected! Free inode 0x%llx has blocks allocated!",
327 ip->i_ino);
328 return -EFSCORRUPTED;
329 }
330 return 0;
331 }
332
333
334 if (VFS_I(ip)->i_mode == 0)
335 return -ENOENT;
336
337 return 0;
338}
339
340
341
342
343static int
344xfs_iget_cache_hit(
345 struct xfs_perag *pag,
346 struct xfs_inode *ip,
347 xfs_ino_t ino,
348 int flags,
349 int lock_flags) __releases(RCU)
350{
351 struct inode *inode = VFS_I(ip);
352 struct xfs_mount *mp = ip->i_mount;
353 int error;
354
355
356
357
358
359
360
361
362 spin_lock(&ip->i_flags_lock);
363 if (ip->i_ino != ino) {
364 trace_xfs_iget_skip(ip);
365 XFS_STATS_INC(mp, xs_ig_frecycle);
366 error = -EAGAIN;
367 goto out_error;
368 }
369
370
371
372
373
374
375
376
377
378
379
380
381 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
382 trace_xfs_iget_skip(ip);
383 XFS_STATS_INC(mp, xs_ig_frecycle);
384 error = -EAGAIN;
385 goto out_error;
386 }
387
388
389
390
391
392 error = xfs_iget_check_free_state(ip, flags);
393 if (error)
394 goto out_error;
395
396
397
398
399
400 if (ip->i_flags & XFS_IRECLAIMABLE) {
401 trace_xfs_iget_reclaim(ip);
402
403 if (flags & XFS_IGET_INCORE) {
404 error = -EAGAIN;
405 goto out_error;
406 }
407
408
409
410
411
412
413
414 ip->i_flags |= XFS_IRECLAIM;
415
416 spin_unlock(&ip->i_flags_lock);
417 rcu_read_unlock();
418
419 error = xfs_reinit_inode(mp, inode);
420 if (error) {
421 bool wake;
422
423
424
425
426 rcu_read_lock();
427 spin_lock(&ip->i_flags_lock);
428 wake = !!__xfs_iflags_test(ip, XFS_INEW);
429 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
430 if (wake)
431 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
432 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
433 trace_xfs_iget_reclaim_fail(ip);
434 goto out_error;
435 }
436
437 spin_lock(&pag->pag_ici_lock);
438 spin_lock(&ip->i_flags_lock);
439
440
441
442
443
444
445 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
446 ip->i_flags |= XFS_INEW;
447 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
448 inode->i_state = I_NEW;
449
450 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
451 init_rwsem(&inode->i_rwsem);
452
453 spin_unlock(&ip->i_flags_lock);
454 spin_unlock(&pag->pag_ici_lock);
455 } else {
456
457 if (!igrab(inode)) {
458 trace_xfs_iget_skip(ip);
459 error = -EAGAIN;
460 goto out_error;
461 }
462
463
464 spin_unlock(&ip->i_flags_lock);
465 rcu_read_unlock();
466 trace_xfs_iget_hit(ip);
467 }
468
469 if (lock_flags != 0)
470 xfs_ilock(ip, lock_flags);
471
472 if (!(flags & XFS_IGET_INCORE))
473 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
474 XFS_STATS_INC(mp, xs_ig_found);
475
476 return 0;
477
478out_error:
479 spin_unlock(&ip->i_flags_lock);
480 rcu_read_unlock();
481 return error;
482}
483
484
485static int
486xfs_iget_cache_miss(
487 struct xfs_mount *mp,
488 struct xfs_perag *pag,
489 xfs_trans_t *tp,
490 xfs_ino_t ino,
491 struct xfs_inode **ipp,
492 int flags,
493 int lock_flags)
494{
495 struct xfs_inode *ip;
496 int error;
497 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
498 int iflags;
499
500 ip = xfs_inode_alloc(mp, ino);
501 if (!ip)
502 return -ENOMEM;
503
504 error = xfs_iread(mp, tp, ip, flags);
505 if (error)
506 goto out_destroy;
507
508 if (!xfs_inode_verify_forks(ip)) {
509 error = -EFSCORRUPTED;
510 goto out_destroy;
511 }
512
513 trace_xfs_iget_miss(ip);
514
515
516
517
518
519
520 error = xfs_iget_check_free_state(ip, flags);
521 if (error)
522 goto out_destroy;
523
524
525
526
527
528
529
530 if (radix_tree_preload(GFP_NOFS)) {
531 error = -EAGAIN;
532 goto out_destroy;
533 }
534
535
536
537
538
539 if (lock_flags) {
540 if (!xfs_ilock_nowait(ip, lock_flags))
541 BUG();
542 }
543
544
545
546
547
548
549
550
551
552
553 iflags = XFS_INEW;
554 if (flags & XFS_IGET_DONTCACHE)
555 iflags |= XFS_IDONTCACHE;
556 ip->i_udquot = NULL;
557 ip->i_gdquot = NULL;
558 ip->i_pdquot = NULL;
559 xfs_iflags_set(ip, iflags);
560
561
562 spin_lock(&pag->pag_ici_lock);
563 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
564 if (unlikely(error)) {
565 WARN_ON(error != -EEXIST);
566 XFS_STATS_INC(mp, xs_ig_dup);
567 error = -EAGAIN;
568 goto out_preload_end;
569 }
570 spin_unlock(&pag->pag_ici_lock);
571 radix_tree_preload_end();
572
573 *ipp = ip;
574 return 0;
575
576out_preload_end:
577 spin_unlock(&pag->pag_ici_lock);
578 radix_tree_preload_end();
579 if (lock_flags)
580 xfs_iunlock(ip, lock_flags);
581out_destroy:
582 __destroy_inode(VFS_I(ip));
583 xfs_inode_free(ip);
584 return error;
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609int
610xfs_iget(
611 xfs_mount_t *mp,
612 xfs_trans_t *tp,
613 xfs_ino_t ino,
614 uint flags,
615 uint lock_flags,
616 xfs_inode_t **ipp)
617{
618 xfs_inode_t *ip;
619 int error;
620 xfs_perag_t *pag;
621 xfs_agino_t agino;
622
623
624
625
626
627
628
629
630 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
631
632
633 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
634 return -EINVAL;
635
636 XFS_STATS_INC(mp, xs_ig_attempts);
637
638
639 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
640 agino = XFS_INO_TO_AGINO(mp, ino);
641
642again:
643 error = 0;
644 rcu_read_lock();
645 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
646
647 if (ip) {
648 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
649 if (error)
650 goto out_error_or_again;
651 } else {
652 rcu_read_unlock();
653 if (flags & XFS_IGET_INCORE) {
654 error = -ENODATA;
655 goto out_error_or_again;
656 }
657 XFS_STATS_INC(mp, xs_ig_missed);
658
659 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
660 flags, lock_flags);
661 if (error)
662 goto out_error_or_again;
663 }
664 xfs_perag_put(pag);
665
666 *ipp = ip;
667
668
669
670
671
672 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
673 xfs_setup_existing_inode(ip);
674 return 0;
675
676out_error_or_again:
677 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
678 delay(1);
679 goto again;
680 }
681 xfs_perag_put(pag);
682 return error;
683}
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704int
705xfs_icache_inode_is_allocated(
706 struct xfs_mount *mp,
707 struct xfs_trans *tp,
708 xfs_ino_t ino,
709 bool *inuse)
710{
711 struct xfs_inode *ip;
712 int error;
713
714 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
715 if (error)
716 return error;
717
718 *inuse = !!(VFS_I(ip)->i_mode);
719 xfs_irele(ip);
720 return 0;
721}
722
723
724
725
726
727
728
729#define XFS_LOOKUP_BATCH 32
730
731STATIC int
732xfs_inode_ag_walk_grab(
733 struct xfs_inode *ip,
734 int flags)
735{
736 struct inode *inode = VFS_I(ip);
737 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
738
739 ASSERT(rcu_read_lock_held());
740
741
742
743
744
745
746
747
748
749
750 spin_lock(&ip->i_flags_lock);
751 if (!ip->i_ino)
752 goto out_unlock_noent;
753
754
755 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
756 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
757 goto out_unlock_noent;
758 spin_unlock(&ip->i_flags_lock);
759
760
761 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
762 return -EFSCORRUPTED;
763
764
765 if (!igrab(inode))
766 return -ENOENT;
767
768
769 return 0;
770
771out_unlock_noent:
772 spin_unlock(&ip->i_flags_lock);
773 return -ENOENT;
774}
775
776STATIC int
777xfs_inode_ag_walk(
778 struct xfs_mount *mp,
779 struct xfs_perag *pag,
780 int (*execute)(struct xfs_inode *ip, int flags,
781 void *args),
782 int flags,
783 void *args,
784 int tag,
785 int iter_flags)
786{
787 uint32_t first_index;
788 int last_error = 0;
789 int skipped;
790 int done;
791 int nr_found;
792
793restart:
794 done = 0;
795 skipped = 0;
796 first_index = 0;
797 nr_found = 0;
798 do {
799 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
800 int error = 0;
801 int i;
802
803 rcu_read_lock();
804
805 if (tag == -1)
806 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
807 (void **)batch, first_index,
808 XFS_LOOKUP_BATCH);
809 else
810 nr_found = radix_tree_gang_lookup_tag(
811 &pag->pag_ici_root,
812 (void **) batch, first_index,
813 XFS_LOOKUP_BATCH, tag);
814
815 if (!nr_found) {
816 rcu_read_unlock();
817 break;
818 }
819
820
821
822
823
824 for (i = 0; i < nr_found; i++) {
825 struct xfs_inode *ip = batch[i];
826
827 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
828 batch[i] = NULL;
829
830
831
832
833
834
835
836
837
838
839
840
841
842 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
843 continue;
844 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
845 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
846 done = 1;
847 }
848
849
850 rcu_read_unlock();
851
852 for (i = 0; i < nr_found; i++) {
853 if (!batch[i])
854 continue;
855 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
856 xfs_iflags_test(batch[i], XFS_INEW))
857 xfs_inew_wait(batch[i]);
858 error = execute(batch[i], flags, args);
859 xfs_irele(batch[i]);
860 if (error == -EAGAIN) {
861 skipped++;
862 continue;
863 }
864 if (error && last_error != -EFSCORRUPTED)
865 last_error = error;
866 }
867
868
869 if (error == -EFSCORRUPTED)
870 break;
871
872 cond_resched();
873
874 } while (nr_found && !done);
875
876 if (skipped) {
877 delay(1);
878 goto restart;
879 }
880 return last_error;
881}
882
883
884
885
886
887void
888xfs_queue_eofblocks(
889 struct xfs_mount *mp)
890{
891 rcu_read_lock();
892 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
893 queue_delayed_work(mp->m_eofblocks_workqueue,
894 &mp->m_eofblocks_work,
895 msecs_to_jiffies(xfs_eofb_secs * 1000));
896 rcu_read_unlock();
897}
898
899void
900xfs_eofblocks_worker(
901 struct work_struct *work)
902{
903 struct xfs_mount *mp = container_of(to_delayed_work(work),
904 struct xfs_mount, m_eofblocks_work);
905 xfs_icache_free_eofblocks(mp, NULL);
906 xfs_queue_eofblocks(mp);
907}
908
909
910
911
912
913
914void
915xfs_queue_cowblocks(
916 struct xfs_mount *mp)
917{
918 rcu_read_lock();
919 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
920 queue_delayed_work(mp->m_eofblocks_workqueue,
921 &mp->m_cowblocks_work,
922 msecs_to_jiffies(xfs_cowb_secs * 1000));
923 rcu_read_unlock();
924}
925
926void
927xfs_cowblocks_worker(
928 struct work_struct *work)
929{
930 struct xfs_mount *mp = container_of(to_delayed_work(work),
931 struct xfs_mount, m_cowblocks_work);
932 xfs_icache_free_cowblocks(mp, NULL);
933 xfs_queue_cowblocks(mp);
934}
935
936int
937xfs_inode_ag_iterator_flags(
938 struct xfs_mount *mp,
939 int (*execute)(struct xfs_inode *ip, int flags,
940 void *args),
941 int flags,
942 void *args,
943 int iter_flags)
944{
945 struct xfs_perag *pag;
946 int error = 0;
947 int last_error = 0;
948 xfs_agnumber_t ag;
949
950 ag = 0;
951 while ((pag = xfs_perag_get(mp, ag))) {
952 ag = pag->pag_agno + 1;
953 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
954 iter_flags);
955 xfs_perag_put(pag);
956 if (error) {
957 last_error = error;
958 if (error == -EFSCORRUPTED)
959 break;
960 }
961 }
962 return last_error;
963}
964
965int
966xfs_inode_ag_iterator(
967 struct xfs_mount *mp,
968 int (*execute)(struct xfs_inode *ip, int flags,
969 void *args),
970 int flags,
971 void *args)
972{
973 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
974}
975
976int
977xfs_inode_ag_iterator_tag(
978 struct xfs_mount *mp,
979 int (*execute)(struct xfs_inode *ip, int flags,
980 void *args),
981 int flags,
982 void *args,
983 int tag)
984{
985 struct xfs_perag *pag;
986 int error = 0;
987 int last_error = 0;
988 xfs_agnumber_t ag;
989
990 ag = 0;
991 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
992 ag = pag->pag_agno + 1;
993 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
994 0);
995 xfs_perag_put(pag);
996 if (error) {
997 last_error = error;
998 if (error == -EFSCORRUPTED)
999 break;
1000 }
1001 }
1002 return last_error;
1003}
1004
1005
1006
1007
1008
1009STATIC int
1010xfs_reclaim_inode_grab(
1011 struct xfs_inode *ip,
1012 int flags)
1013{
1014 ASSERT(rcu_read_lock_held());
1015
1016
1017 if (!ip->i_ino)
1018 return 1;
1019
1020
1021
1022
1023
1024
1025 if ((flags & SYNC_TRYLOCK) &&
1026 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1027 return 1;
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 spin_lock(&ip->i_flags_lock);
1040 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1041 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1042
1043 spin_unlock(&ip->i_flags_lock);
1044 return 1;
1045 }
1046 __xfs_iflags_set(ip, XFS_IRECLAIM);
1047 spin_unlock(&ip->i_flags_lock);
1048 return 0;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090STATIC int
1091xfs_reclaim_inode(
1092 struct xfs_inode *ip,
1093 struct xfs_perag *pag,
1094 int sync_mode)
1095{
1096 struct xfs_buf *bp = NULL;
1097 xfs_ino_t ino = ip->i_ino;
1098 int error;
1099
1100restart:
1101 error = 0;
1102 xfs_ilock(ip, XFS_ILOCK_EXCL);
1103 if (!xfs_iflock_nowait(ip)) {
1104 if (!(sync_mode & SYNC_WAIT))
1105 goto out;
1106 xfs_iflock(ip);
1107 }
1108
1109 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1110 xfs_iunpin_wait(ip);
1111
1112 xfs_iflush_abort(ip, false);
1113 goto reclaim;
1114 }
1115 if (xfs_ipincount(ip)) {
1116 if (!(sync_mode & SYNC_WAIT))
1117 goto out_ifunlock;
1118 xfs_iunpin_wait(ip);
1119 }
1120 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1121 xfs_ifunlock(ip);
1122 goto reclaim;
1123 }
1124
1125
1126
1127
1128
1129 if (!(sync_mode & SYNC_WAIT))
1130 goto out_ifunlock;
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 error = xfs_iflush(ip, &bp);
1149 if (error == -EAGAIN) {
1150 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1151
1152 delay(2);
1153 goto restart;
1154 }
1155
1156 if (!error) {
1157 error = xfs_bwrite(bp);
1158 xfs_buf_relse(bp);
1159 }
1160
1161reclaim:
1162 ASSERT(!xfs_isiflocked(ip));
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 spin_lock(&ip->i_flags_lock);
1175 ip->i_flags = XFS_IRECLAIM;
1176 ip->i_ino = 0;
1177 spin_unlock(&ip->i_flags_lock);
1178
1179 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1180
1181 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1182
1183
1184
1185
1186
1187
1188
1189 spin_lock(&pag->pag_ici_lock);
1190 if (!radix_tree_delete(&pag->pag_ici_root,
1191 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1192 ASSERT(0);
1193 xfs_perag_clear_reclaim_tag(pag);
1194 spin_unlock(&pag->pag_ici_lock);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 xfs_ilock(ip, XFS_ILOCK_EXCL);
1205 xfs_qm_dqdetach(ip);
1206 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1207
1208 __xfs_inode_free(ip);
1209 return error;
1210
1211out_ifunlock:
1212 xfs_ifunlock(ip);
1213out:
1214 xfs_iflags_clear(ip, XFS_IRECLAIM);
1215 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1216
1217
1218
1219
1220
1221
1222
1223 return 0;
1224}
1225
1226
1227
1228
1229
1230
1231
1232STATIC int
1233xfs_reclaim_inodes_ag(
1234 struct xfs_mount *mp,
1235 int flags,
1236 int *nr_to_scan)
1237{
1238 struct xfs_perag *pag;
1239 int error = 0;
1240 int last_error = 0;
1241 xfs_agnumber_t ag;
1242 int trylock = flags & SYNC_TRYLOCK;
1243 int skipped;
1244
1245restart:
1246 ag = 0;
1247 skipped = 0;
1248 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1249 unsigned long first_index = 0;
1250 int done = 0;
1251 int nr_found = 0;
1252
1253 ag = pag->pag_agno + 1;
1254
1255 if (trylock) {
1256 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1257 skipped++;
1258 xfs_perag_put(pag);
1259 continue;
1260 }
1261 first_index = pag->pag_ici_reclaim_cursor;
1262 } else
1263 mutex_lock(&pag->pag_ici_reclaim_lock);
1264
1265 do {
1266 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1267 int i;
1268
1269 rcu_read_lock();
1270 nr_found = radix_tree_gang_lookup_tag(
1271 &pag->pag_ici_root,
1272 (void **)batch, first_index,
1273 XFS_LOOKUP_BATCH,
1274 XFS_ICI_RECLAIM_TAG);
1275 if (!nr_found) {
1276 done = 1;
1277 rcu_read_unlock();
1278 break;
1279 }
1280
1281
1282
1283
1284
1285 for (i = 0; i < nr_found; i++) {
1286 struct xfs_inode *ip = batch[i];
1287
1288 if (done || xfs_reclaim_inode_grab(ip, flags))
1289 batch[i] = NULL;
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1306 pag->pag_agno)
1307 continue;
1308 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1309 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1310 done = 1;
1311 }
1312
1313
1314 rcu_read_unlock();
1315
1316 for (i = 0; i < nr_found; i++) {
1317 if (!batch[i])
1318 continue;
1319 error = xfs_reclaim_inode(batch[i], pag, flags);
1320 if (error && last_error != -EFSCORRUPTED)
1321 last_error = error;
1322 }
1323
1324 *nr_to_scan -= XFS_LOOKUP_BATCH;
1325
1326 cond_resched();
1327
1328 } while (nr_found && !done && *nr_to_scan > 0);
1329
1330 if (trylock && !done)
1331 pag->pag_ici_reclaim_cursor = first_index;
1332 else
1333 pag->pag_ici_reclaim_cursor = 0;
1334 mutex_unlock(&pag->pag_ici_reclaim_lock);
1335 xfs_perag_put(pag);
1336 }
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1346 trylock = 0;
1347 goto restart;
1348 }
1349 return last_error;
1350}
1351
1352int
1353xfs_reclaim_inodes(
1354 xfs_mount_t *mp,
1355 int mode)
1356{
1357 int nr_to_scan = INT_MAX;
1358
1359 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371long
1372xfs_reclaim_inodes_nr(
1373 struct xfs_mount *mp,
1374 int nr_to_scan)
1375{
1376
1377 xfs_reclaim_work_queue(mp);
1378 xfs_ail_push_all(mp->m_ail);
1379
1380 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1381}
1382
1383
1384
1385
1386
1387int
1388xfs_reclaim_inodes_count(
1389 struct xfs_mount *mp)
1390{
1391 struct xfs_perag *pag;
1392 xfs_agnumber_t ag = 0;
1393 int reclaimable = 0;
1394
1395 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1396 ag = pag->pag_agno + 1;
1397 reclaimable += pag->pag_ici_reclaimable;
1398 xfs_perag_put(pag);
1399 }
1400 return reclaimable;
1401}
1402
1403STATIC int
1404xfs_inode_match_id(
1405 struct xfs_inode *ip,
1406 struct xfs_eofblocks *eofb)
1407{
1408 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1409 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1410 return 0;
1411
1412 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1413 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1414 return 0;
1415
1416 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1417 xfs_get_projid(ip) != eofb->eof_prid)
1418 return 0;
1419
1420 return 1;
1421}
1422
1423
1424
1425
1426
1427STATIC int
1428xfs_inode_match_id_union(
1429 struct xfs_inode *ip,
1430 struct xfs_eofblocks *eofb)
1431{
1432 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1433 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1434 return 1;
1435
1436 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1437 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1438 return 1;
1439
1440 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1441 xfs_get_projid(ip) == eofb->eof_prid)
1442 return 1;
1443
1444 return 0;
1445}
1446
1447STATIC int
1448xfs_inode_free_eofblocks(
1449 struct xfs_inode *ip,
1450 int flags,
1451 void *args)
1452{
1453 int ret = 0;
1454 struct xfs_eofblocks *eofb = args;
1455 int match;
1456
1457 if (!xfs_can_free_eofblocks(ip, false)) {
1458
1459 trace_xfs_inode_free_eofblocks_invalid(ip);
1460 xfs_inode_clear_eofblocks_tag(ip);
1461 return 0;
1462 }
1463
1464
1465
1466
1467
1468 if (!(flags & SYNC_WAIT) &&
1469 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1470 return 0;
1471
1472 if (eofb) {
1473 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1474 match = xfs_inode_match_id_union(ip, eofb);
1475 else
1476 match = xfs_inode_match_id(ip, eofb);
1477 if (!match)
1478 return 0;
1479
1480
1481 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1482 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1483 return 0;
1484 }
1485
1486
1487
1488
1489
1490 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1491 if (flags & SYNC_WAIT)
1492 ret = -EAGAIN;
1493 return ret;
1494 }
1495 ret = xfs_free_eofblocks(ip);
1496 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1497
1498 return ret;
1499}
1500
1501static int
1502__xfs_icache_free_eofblocks(
1503 struct xfs_mount *mp,
1504 struct xfs_eofblocks *eofb,
1505 int (*execute)(struct xfs_inode *ip, int flags,
1506 void *args),
1507 int tag)
1508{
1509 int flags = SYNC_TRYLOCK;
1510
1511 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1512 flags = SYNC_WAIT;
1513
1514 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1515 eofb, tag);
1516}
1517
1518int
1519xfs_icache_free_eofblocks(
1520 struct xfs_mount *mp,
1521 struct xfs_eofblocks *eofb)
1522{
1523 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1524 XFS_ICI_EOFBLOCKS_TAG);
1525}
1526
1527
1528
1529
1530
1531
1532
1533static int
1534__xfs_inode_free_quota_eofblocks(
1535 struct xfs_inode *ip,
1536 int (*execute)(struct xfs_mount *mp,
1537 struct xfs_eofblocks *eofb))
1538{
1539 int scan = 0;
1540 struct xfs_eofblocks eofb = {0};
1541 struct xfs_dquot *dq;
1542
1543
1544
1545
1546
1547 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1548
1549 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1550 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1551 if (dq && xfs_dquot_lowsp(dq)) {
1552 eofb.eof_uid = VFS_I(ip)->i_uid;
1553 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1554 scan = 1;
1555 }
1556 }
1557
1558 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1559 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1560 if (dq && xfs_dquot_lowsp(dq)) {
1561 eofb.eof_gid = VFS_I(ip)->i_gid;
1562 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1563 scan = 1;
1564 }
1565 }
1566
1567 if (scan)
1568 execute(ip->i_mount, &eofb);
1569
1570 return scan;
1571}
1572
1573int
1574xfs_inode_free_quota_eofblocks(
1575 struct xfs_inode *ip)
1576{
1577 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1578}
1579
1580static inline unsigned long
1581xfs_iflag_for_tag(
1582 int tag)
1583{
1584 switch (tag) {
1585 case XFS_ICI_EOFBLOCKS_TAG:
1586 return XFS_IEOFBLOCKS;
1587 case XFS_ICI_COWBLOCKS_TAG:
1588 return XFS_ICOWBLOCKS;
1589 default:
1590 ASSERT(0);
1591 return 0;
1592 }
1593}
1594
1595static void
1596__xfs_inode_set_blocks_tag(
1597 xfs_inode_t *ip,
1598 void (*execute)(struct xfs_mount *mp),
1599 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1600 int error, unsigned long caller_ip),
1601 int tag)
1602{
1603 struct xfs_mount *mp = ip->i_mount;
1604 struct xfs_perag *pag;
1605 int tagged;
1606
1607
1608
1609
1610
1611 if (ip->i_flags & xfs_iflag_for_tag(tag))
1612 return;
1613 spin_lock(&ip->i_flags_lock);
1614 ip->i_flags |= xfs_iflag_for_tag(tag);
1615 spin_unlock(&ip->i_flags_lock);
1616
1617 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1618 spin_lock(&pag->pag_ici_lock);
1619
1620 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1621 radix_tree_tag_set(&pag->pag_ici_root,
1622 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1623 if (!tagged) {
1624
1625 spin_lock(&ip->i_mount->m_perag_lock);
1626 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1627 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1628 tag);
1629 spin_unlock(&ip->i_mount->m_perag_lock);
1630
1631
1632 execute(ip->i_mount);
1633
1634 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1635 }
1636
1637 spin_unlock(&pag->pag_ici_lock);
1638 xfs_perag_put(pag);
1639}
1640
1641void
1642xfs_inode_set_eofblocks_tag(
1643 xfs_inode_t *ip)
1644{
1645 trace_xfs_inode_set_eofblocks_tag(ip);
1646 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1647 trace_xfs_perag_set_eofblocks,
1648 XFS_ICI_EOFBLOCKS_TAG);
1649}
1650
1651static void
1652__xfs_inode_clear_blocks_tag(
1653 xfs_inode_t *ip,
1654 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1655 int error, unsigned long caller_ip),
1656 int tag)
1657{
1658 struct xfs_mount *mp = ip->i_mount;
1659 struct xfs_perag *pag;
1660
1661 spin_lock(&ip->i_flags_lock);
1662 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1663 spin_unlock(&ip->i_flags_lock);
1664
1665 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1666 spin_lock(&pag->pag_ici_lock);
1667
1668 radix_tree_tag_clear(&pag->pag_ici_root,
1669 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1670 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1671
1672 spin_lock(&ip->i_mount->m_perag_lock);
1673 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1674 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1675 tag);
1676 spin_unlock(&ip->i_mount->m_perag_lock);
1677 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1678 }
1679
1680 spin_unlock(&pag->pag_ici_lock);
1681 xfs_perag_put(pag);
1682}
1683
1684void
1685xfs_inode_clear_eofblocks_tag(
1686 xfs_inode_t *ip)
1687{
1688 trace_xfs_inode_clear_eofblocks_tag(ip);
1689 return __xfs_inode_clear_blocks_tag(ip,
1690 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1691}
1692
1693
1694
1695
1696
1697
1698static bool
1699xfs_prep_free_cowblocks(
1700 struct xfs_inode *ip)
1701{
1702
1703
1704
1705
1706 if (!xfs_inode_has_cow_data(ip)) {
1707 trace_xfs_inode_free_cowblocks_invalid(ip);
1708 xfs_inode_clear_cowblocks_tag(ip);
1709 return false;
1710 }
1711
1712
1713
1714
1715
1716 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1717 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1718 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1719 atomic_read(&VFS_I(ip)->i_dio_count))
1720 return false;
1721
1722 return true;
1723}
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737STATIC int
1738xfs_inode_free_cowblocks(
1739 struct xfs_inode *ip,
1740 int flags,
1741 void *args)
1742{
1743 struct xfs_eofblocks *eofb = args;
1744 int match;
1745 int ret = 0;
1746
1747 if (!xfs_prep_free_cowblocks(ip))
1748 return 0;
1749
1750 if (eofb) {
1751 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1752 match = xfs_inode_match_id_union(ip, eofb);
1753 else
1754 match = xfs_inode_match_id(ip, eofb);
1755 if (!match)
1756 return 0;
1757
1758
1759 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1760 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1761 return 0;
1762 }
1763
1764
1765 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1766 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1767
1768
1769
1770
1771
1772 if (xfs_prep_free_cowblocks(ip))
1773 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1774
1775 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1776 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1777
1778 return ret;
1779}
1780
1781int
1782xfs_icache_free_cowblocks(
1783 struct xfs_mount *mp,
1784 struct xfs_eofblocks *eofb)
1785{
1786 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1787 XFS_ICI_COWBLOCKS_TAG);
1788}
1789
1790int
1791xfs_inode_free_quota_cowblocks(
1792 struct xfs_inode *ip)
1793{
1794 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1795}
1796
1797void
1798xfs_inode_set_cowblocks_tag(
1799 xfs_inode_t *ip)
1800{
1801 trace_xfs_inode_set_cowblocks_tag(ip);
1802 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1803 trace_xfs_perag_set_cowblocks,
1804 XFS_ICI_COWBLOCKS_TAG);
1805}
1806
1807void
1808xfs_inode_clear_cowblocks_tag(
1809 xfs_inode_t *ip)
1810{
1811 trace_xfs_inode_clear_cowblocks_tag(ip);
1812 return __xfs_inode_clear_blocks_tag(ip,
1813 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1814}
1815
1816
1817void
1818xfs_icache_disable_reclaim(
1819 struct xfs_mount *mp)
1820{
1821 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1822 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1823}
1824
1825
1826void
1827xfs_icache_enable_reclaim(
1828 struct xfs_mount *mp)
1829{
1830 xfs_queue_eofblocks(mp);
1831 xfs_queue_cowblocks(mp);
1832}
1833