1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_acl.h"
22#include "xfs_bit.h"
23#include "xfs_log.h"
24#include "xfs_inum.h"
25#include "xfs_trans.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_dmapi.h"
30#include "xfs_mount.h"
31#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
40#include "xfs_quota.h"
41#include "xfs_utils.h"
42#include "xfs_trans_priv.h"
43#include "xfs_inode_item.h"
44#include "xfs_bmap.h"
45#include "xfs_btree_trace.h"
46#include "xfs_dir2_trace.h"
47
48
49
50
51
52STATIC struct xfs_inode *
53xfs_inode_alloc(
54 struct xfs_mount *mp,
55 xfs_ino_t ino)
56{
57 struct xfs_inode *ip;
58
59
60
61
62
63
64 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
65 if (!ip)
66 return NULL;
67 if (inode_init_always(mp->m_super, VFS_I(ip))) {
68 kmem_zone_free(xfs_inode_zone, ip);
69 return NULL;
70 }
71
72 ASSERT(atomic_read(&ip->i_iocount) == 0);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush));
76
77
78 ip->i_ino = ino;
79 ip->i_mount = mp;
80 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
81 ip->i_afp = NULL;
82 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 ip->i_flags = 0;
84 ip->i_update_core = 0;
85 ip->i_delayed_blks = 0;
86 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
87 ip->i_size = 0;
88 ip->i_new_size = 0;
89
90
91
92
93#ifdef XFS_INODE_TRACE
94 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
95#endif
96#ifdef XFS_BMAP_TRACE
97 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
98#endif
99#ifdef XFS_BTREE_TRACE
100 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
101#endif
102#ifdef XFS_RW_TRACE
103 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
104#endif
105#ifdef XFS_ILOCK_TRACE
106 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
107#endif
108#ifdef XFS_DIR2_TRACE
109 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
110#endif
111
112
113 VFS_I(ip)->i_state = I_NEW|I_LOCK;
114
115 return ip;
116}
117
118STATIC void
119xfs_inode_free(
120 struct xfs_inode *ip)
121{
122 switch (ip->i_d.di_mode & S_IFMT) {
123 case S_IFREG:
124 case S_IFDIR:
125 case S_IFLNK:
126 xfs_idestroy_fork(ip, XFS_DATA_FORK);
127 break;
128 }
129
130 if (ip->i_afp)
131 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
132
133#ifdef XFS_INODE_TRACE
134 ktrace_free(ip->i_trace);
135#endif
136#ifdef XFS_BMAP_TRACE
137 ktrace_free(ip->i_xtrace);
138#endif
139#ifdef XFS_BTREE_TRACE
140 ktrace_free(ip->i_btrace);
141#endif
142#ifdef XFS_RW_TRACE
143 ktrace_free(ip->i_rwtrace);
144#endif
145#ifdef XFS_ILOCK_TRACE
146 ktrace_free(ip->i_lock_trace);
147#endif
148#ifdef XFS_DIR2_TRACE
149 ktrace_free(ip->i_dir_trace);
150#endif
151
152 if (ip->i_itemp) {
153
154
155
156
157
158 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
159 struct xfs_ail *ailp = lip->li_ailp;
160
161 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
162 XFS_FORCED_SHUTDOWN(ip->i_mount));
163 if (lip->li_flags & XFS_LI_IN_AIL) {
164 spin_lock(&ailp->xa_lock);
165 if (lip->li_flags & XFS_LI_IN_AIL)
166 xfs_trans_ail_delete(ailp, lip);
167 else
168 spin_unlock(&ailp->xa_lock);
169 }
170 xfs_inode_item_destroy(ip);
171 ip->i_itemp = NULL;
172 }
173
174
175 ASSERT(atomic_read(&ip->i_iocount) == 0);
176 ASSERT(atomic_read(&ip->i_pincount) == 0);
177 ASSERT(!spin_is_locked(&ip->i_flags_lock));
178 ASSERT(completion_done(&ip->i_flush));
179
180 kmem_zone_free(xfs_inode_zone, ip);
181}
182
183
184
185
186static int
187xfs_iget_cache_hit(
188 struct xfs_perag *pag,
189 struct xfs_inode *ip,
190 int flags,
191 int lock_flags) __releases(pag->pag_ici_lock)
192{
193 struct inode *inode = VFS_I(ip);
194 struct xfs_mount *mp = ip->i_mount;
195 int error;
196
197 spin_lock(&ip->i_flags_lock);
198
199
200
201
202
203
204
205
206
207
208
209 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
210 XFS_STATS_INC(xs_ig_frecycle);
211 error = EAGAIN;
212 goto out_error;
213 }
214
215
216
217
218 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
219 error = ENOENT;
220 goto out_error;
221 }
222
223
224
225
226
227 if (ip->i_flags & XFS_IRECLAIMABLE) {
228 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
229
230
231
232
233
234
235 ip->i_flags |= XFS_INEW;
236 ip->i_flags &= ~XFS_IRECLAIMABLE;
237 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
238
239 spin_unlock(&ip->i_flags_lock);
240 read_unlock(&pag->pag_ici_lock);
241
242 error = -inode_init_always(mp->m_super, inode);
243 if (error) {
244
245
246
247
248 read_lock(&pag->pag_ici_lock);
249 spin_lock(&ip->i_flags_lock);
250
251 ip->i_flags &= ~XFS_INEW;
252 ip->i_flags |= XFS_IRECLAIMABLE;
253 __xfs_inode_set_reclaim_tag(pag, ip);
254 goto out_error;
255 }
256 inode->i_state = I_LOCK|I_NEW;
257 } else {
258
259 if (!igrab(inode)) {
260 error = EAGAIN;
261 goto out_error;
262 }
263
264
265 spin_unlock(&ip->i_flags_lock);
266 read_unlock(&pag->pag_ici_lock);
267 }
268
269 if (lock_flags != 0)
270 xfs_ilock(ip, lock_flags);
271
272 xfs_iflags_clear(ip, XFS_ISTALE);
273 xfs_itrace_exit_tag(ip, "xfs_iget.found");
274 XFS_STATS_INC(xs_ig_found);
275 return 0;
276
277out_error:
278 spin_unlock(&ip->i_flags_lock);
279 read_unlock(&pag->pag_ici_lock);
280 return error;
281}
282
283
284static int
285xfs_iget_cache_miss(
286 struct xfs_mount *mp,
287 struct xfs_perag *pag,
288 xfs_trans_t *tp,
289 xfs_ino_t ino,
290 struct xfs_inode **ipp,
291 xfs_daddr_t bno,
292 int flags,
293 int lock_flags) __releases(pag->pag_ici_lock)
294{
295 struct xfs_inode *ip;
296 int error;
297 unsigned long first_index, mask;
298 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
299
300 ip = xfs_inode_alloc(mp, ino);
301 if (!ip)
302 return ENOMEM;
303
304 error = xfs_iread(mp, tp, ip, bno, flags);
305 if (error)
306 goto out_destroy;
307
308 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
309
310 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
311 error = ENOENT;
312 goto out_destroy;
313 }
314
315
316
317
318
319
320 if (radix_tree_preload(GFP_KERNEL)) {
321 error = EAGAIN;
322 goto out_destroy;
323 }
324
325
326
327
328
329 if (lock_flags) {
330 if (!xfs_ilock_nowait(ip, lock_flags))
331 BUG();
332 }
333
334 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
335 first_index = agino & mask;
336 write_lock(&pag->pag_ici_lock);
337
338
339 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
340 if (unlikely(error)) {
341 WARN_ON(error != -EEXIST);
342 XFS_STATS_INC(xs_ig_dup);
343 error = EAGAIN;
344 goto out_preload_end;
345 }
346
347
348 ip->i_udquot = ip->i_gdquot = NULL;
349 xfs_iflags_set(ip, XFS_INEW);
350
351 write_unlock(&pag->pag_ici_lock);
352 radix_tree_preload_end();
353 *ipp = ip;
354 return 0;
355
356out_preload_end:
357 write_unlock(&pag->pag_ici_lock);
358 radix_tree_preload_end();
359 if (lock_flags)
360 xfs_iunlock(ip, lock_flags);
361out_destroy:
362 __destroy_inode(VFS_I(ip));
363 xfs_inode_free(ip);
364 return error;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391int
392xfs_iget(
393 xfs_mount_t *mp,
394 xfs_trans_t *tp,
395 xfs_ino_t ino,
396 uint flags,
397 uint lock_flags,
398 xfs_inode_t **ipp,
399 xfs_daddr_t bno)
400{
401 xfs_inode_t *ip;
402 int error;
403 xfs_perag_t *pag;
404 xfs_agino_t agino;
405
406
407 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
408 return EINVAL;
409
410
411 pag = xfs_get_perag(mp, ino);
412 if (!pag->pagi_inodeok)
413 return EINVAL;
414 ASSERT(pag->pag_ici_init);
415 agino = XFS_INO_TO_AGINO(mp, ino);
416
417again:
418 error = 0;
419 read_lock(&pag->pag_ici_lock);
420 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
421
422 if (ip) {
423 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
424 if (error)
425 goto out_error_or_again;
426 } else {
427 read_unlock(&pag->pag_ici_lock);
428 XFS_STATS_INC(xs_ig_missed);
429
430 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
431 flags, lock_flags);
432 if (error)
433 goto out_error_or_again;
434 }
435 xfs_put_perag(mp, pag);
436
437 *ipp = ip;
438
439 ASSERT(ip->i_df.if_ext_max ==
440 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
441
442
443
444
445 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
446 xfs_setup_inode(ip);
447 return 0;
448
449out_error_or_again:
450 if (error == EAGAIN) {
451 delay(1);
452 goto again;
453 }
454 xfs_put_perag(mp, pag);
455 return error;
456}
457
458
459
460
461
462
463
464
465
466void
467xfs_iput(xfs_inode_t *ip,
468 uint lock_flags)
469{
470 xfs_itrace_entry(ip);
471 xfs_iunlock(ip, lock_flags);
472 IRELE(ip);
473}
474
475
476
477
478void
479xfs_iput_new(
480 xfs_inode_t *ip,
481 uint lock_flags)
482{
483 struct inode *inode = VFS_I(ip);
484
485 xfs_itrace_entry(ip);
486
487 if ((ip->i_d.di_mode == 0)) {
488 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
489 make_bad_inode(inode);
490 }
491 if (inode->i_state & I_NEW)
492 unlock_new_inode(inode);
493 if (lock_flags)
494 xfs_iunlock(ip, lock_flags);
495 IRELE(ip);
496}
497
498
499
500
501
502
503
504
505
506
507
508void
509xfs_ireclaim(
510 struct xfs_inode *ip)
511{
512 struct xfs_mount *mp = ip->i_mount;
513 struct xfs_perag *pag;
514
515 XFS_STATS_INC(xs_ig_reclaims);
516
517
518
519
520
521
522 pag = xfs_get_perag(mp, ip->i_ino);
523 write_lock(&pag->pag_ici_lock);
524 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
525 write_unlock(&pag->pag_ici_lock);
526 xfs_put_perag(mp, pag);
527
528
529
530
531
532
533
534
535
536
537
538 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
539 xfs_qm_dqdetach(ip);
540 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
541
542 xfs_inode_free(ip);
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563uint
564xfs_ilock_map_shared(
565 xfs_inode_t *ip)
566{
567 uint lock_mode;
568
569 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
570 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
571 lock_mode = XFS_ILOCK_EXCL;
572 } else {
573 lock_mode = XFS_ILOCK_SHARED;
574 }
575
576 xfs_ilock(ip, lock_mode);
577
578 return lock_mode;
579}
580
581
582
583
584
585void
586xfs_iunlock_map_shared(
587 xfs_inode_t *ip,
588 unsigned int lock_mode)
589{
590 xfs_iunlock(ip, lock_mode);
591}
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613void
614xfs_ilock(
615 xfs_inode_t *ip,
616 uint lock_flags)
617{
618
619
620
621
622
623 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
624 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
625 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
626 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
627 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
628
629 if (lock_flags & XFS_IOLOCK_EXCL)
630 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
631 else if (lock_flags & XFS_IOLOCK_SHARED)
632 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
633
634 if (lock_flags & XFS_ILOCK_EXCL)
635 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
636 else if (lock_flags & XFS_ILOCK_SHARED)
637 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
638
639 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
640}
641
642
643
644
645
646
647
648
649
650
651
652
653
654int
655xfs_ilock_nowait(
656 xfs_inode_t *ip,
657 uint lock_flags)
658{
659
660
661
662
663
664 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
665 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
666 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
667 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
668 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
669
670 if (lock_flags & XFS_IOLOCK_EXCL) {
671 if (!mrtryupdate(&ip->i_iolock))
672 goto out;
673 } else if (lock_flags & XFS_IOLOCK_SHARED) {
674 if (!mrtryaccess(&ip->i_iolock))
675 goto out;
676 }
677 if (lock_flags & XFS_ILOCK_EXCL) {
678 if (!mrtryupdate(&ip->i_lock))
679 goto out_undo_iolock;
680 } else if (lock_flags & XFS_ILOCK_SHARED) {
681 if (!mrtryaccess(&ip->i_lock))
682 goto out_undo_iolock;
683 }
684 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
685 return 1;
686
687 out_undo_iolock:
688 if (lock_flags & XFS_IOLOCK_EXCL)
689 mrunlock_excl(&ip->i_iolock);
690 else if (lock_flags & XFS_IOLOCK_SHARED)
691 mrunlock_shared(&ip->i_iolock);
692 out:
693 return 0;
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708void
709xfs_iunlock(
710 xfs_inode_t *ip,
711 uint lock_flags)
712{
713
714
715
716
717
718 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
719 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
720 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
721 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
722 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
723 XFS_LOCK_DEP_MASK)) == 0);
724 ASSERT(lock_flags != 0);
725
726 if (lock_flags & XFS_IOLOCK_EXCL)
727 mrunlock_excl(&ip->i_iolock);
728 else if (lock_flags & XFS_IOLOCK_SHARED)
729 mrunlock_shared(&ip->i_iolock);
730
731 if (lock_flags & XFS_ILOCK_EXCL)
732 mrunlock_excl(&ip->i_lock);
733 else if (lock_flags & XFS_ILOCK_SHARED)
734 mrunlock_shared(&ip->i_lock);
735
736 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
737 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
738
739
740
741
742
743 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
744 (xfs_log_item_t*)(ip->i_itemp));
745 }
746 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
747}
748
749
750
751
752
753void
754xfs_ilock_demote(
755 xfs_inode_t *ip,
756 uint lock_flags)
757{
758 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
759 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
760
761 if (lock_flags & XFS_ILOCK_EXCL)
762 mrdemote(&ip->i_lock);
763 if (lock_flags & XFS_IOLOCK_EXCL)
764 mrdemote(&ip->i_iolock);
765}
766
767#ifdef DEBUG
768
769
770
771
772
773
774
775int
776xfs_isilocked(
777 xfs_inode_t *ip,
778 uint lock_flags)
779{
780 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
781 XFS_ILOCK_EXCL) {
782 if (!ip->i_lock.mr_writer)
783 return 0;
784 }
785
786 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
787 XFS_IOLOCK_EXCL) {
788 if (!ip->i_iolock.mr_writer)
789 return 0;
790 }
791
792 return 1;
793}
794#endif
795
796#ifdef XFS_INODE_TRACE
797
798#define KTRACE_ENTER(ip, vk, s, line, ra) \
799 ktrace_enter((ip)->i_trace, \
800 (void *)(__psint_t)(vk), \
801 (void *)(s), \
802 (void *)(__psint_t) line, \
803 (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
804 (void *)(ra), \
805 NULL, \
806 (void *)(__psint_t)current_cpu(), \
807 (void *)(__psint_t)current_pid(), \
808 (void *)__return_address, \
809 NULL, NULL, NULL, NULL, NULL, NULL, NULL)
810
811
812
813
814void
815_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
816{
817 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
818}
819
820void
821_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
822{
823 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
824}
825
826void
827xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
828{
829 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
830}
831
832void
833_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
834{
835 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
836}
837
838void
839xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
840{
841 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
842}
843#endif
844