1
2
3
4
5
6#include <linux/log2.h>
7#include <linux/iversion.h>
8
9#include "xfs.h"
10#include "xfs_fs.h"
11#include "xfs_shared.h"
12#include "xfs_format.h"
13#include "xfs_log_format.h"
14#include "xfs_trans_resv.h"
15#include "xfs_sb.h"
16#include "xfs_mount.h"
17#include "xfs_defer.h"
18#include "xfs_inode.h"
19#include "xfs_da_format.h"
20#include "xfs_da_btree.h"
21#include "xfs_dir2.h"
22#include "xfs_attr_sf.h"
23#include "xfs_attr.h"
24#include "xfs_trans_space.h"
25#include "xfs_trans.h"
26#include "xfs_buf_item.h"
27#include "xfs_inode_item.h"
28#include "xfs_ialloc.h"
29#include "xfs_bmap.h"
30#include "xfs_bmap_util.h"
31#include "xfs_errortag.h"
32#include "xfs_error.h"
33#include "xfs_quota.h"
34#include "xfs_filestream.h"
35#include "xfs_cksum.h"
36#include "xfs_trace.h"
37#include "xfs_icache.h"
38#include "xfs_symlink.h"
39#include "xfs_trans_priv.h"
40#include "xfs_log.h"
41#include "xfs_bmap_btree.h"
42#include "xfs_reflink.h"
43#include "xfs_dir2_priv.h"
44
45kmem_zone_t *xfs_inode_zone;
46
47
48
49
50
51#define XFS_ITRUNC_MAX_EXTENTS 2
52
53STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
54STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
55STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
56
57
58
59
60xfs_extlen_t
61xfs_get_extsz_hint(
62 struct xfs_inode *ip)
63{
64 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
65 return ip->i_d.di_extsize;
66 if (XFS_IS_REALTIME_INODE(ip))
67 return ip->i_mount->m_sb.sb_rextsize;
68 return 0;
69}
70
71
72
73
74
75
76
77xfs_extlen_t
78xfs_get_cowextsz_hint(
79 struct xfs_inode *ip)
80{
81 xfs_extlen_t a, b;
82
83 a = 0;
84 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
85 a = ip->i_d.di_cowextsize;
86 b = xfs_get_extsz_hint(ip);
87
88 a = max(a, b);
89 if (a == 0)
90 return XFS_DEFAULT_COWEXTSZ_HINT;
91 return a;
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109uint
110xfs_ilock_data_map_shared(
111 struct xfs_inode *ip)
112{
113 uint lock_mode = XFS_ILOCK_SHARED;
114
115 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
116 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
117 lock_mode = XFS_ILOCK_EXCL;
118 xfs_ilock(ip, lock_mode);
119 return lock_mode;
120}
121
122uint
123xfs_ilock_attr_map_shared(
124 struct xfs_inode *ip)
125{
126 uint lock_mode = XFS_ILOCK_SHARED;
127
128 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
129 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
132 return lock_mode;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165void
166xfs_ilock(
167 xfs_inode_t *ip,
168 uint lock_flags)
169{
170 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171
172
173
174
175
176
177 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
183 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184
185 if (lock_flags & XFS_IOLOCK_EXCL) {
186 down_write_nested(&VFS_I(ip)->i_rwsem,
187 XFS_IOLOCK_DEP(lock_flags));
188 } else if (lock_flags & XFS_IOLOCK_SHARED) {
189 down_read_nested(&VFS_I(ip)->i_rwsem,
190 XFS_IOLOCK_DEP(lock_flags));
191 }
192
193 if (lock_flags & XFS_MMAPLOCK_EXCL)
194 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197
198 if (lock_flags & XFS_ILOCK_EXCL)
199 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 else if (lock_flags & XFS_ILOCK_SHARED)
201 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202}
203
204
205
206
207
208
209
210
211
212
213
214
215
216int
217xfs_ilock_nowait(
218 xfs_inode_t *ip,
219 uint lock_flags)
220{
221 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222
223
224
225
226
227
228 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
234 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235
236 if (lock_flags & XFS_IOLOCK_EXCL) {
237 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238 goto out;
239 } else if (lock_flags & XFS_IOLOCK_SHARED) {
240 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241 goto out;
242 }
243
244 if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 if (!mrtryupdate(&ip->i_mmaplock))
246 goto out_undo_iolock;
247 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 if (!mrtryaccess(&ip->i_mmaplock))
249 goto out_undo_iolock;
250 }
251
252 if (lock_flags & XFS_ILOCK_EXCL) {
253 if (!mrtryupdate(&ip->i_lock))
254 goto out_undo_mmaplock;
255 } else if (lock_flags & XFS_ILOCK_SHARED) {
256 if (!mrtryaccess(&ip->i_lock))
257 goto out_undo_mmaplock;
258 }
259 return 1;
260
261out_undo_mmaplock:
262 if (lock_flags & XFS_MMAPLOCK_EXCL)
263 mrunlock_excl(&ip->i_mmaplock);
264 else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 mrunlock_shared(&ip->i_mmaplock);
266out_undo_iolock:
267 if (lock_flags & XFS_IOLOCK_EXCL)
268 up_write(&VFS_I(ip)->i_rwsem);
269 else if (lock_flags & XFS_IOLOCK_SHARED)
270 up_read(&VFS_I(ip)->i_rwsem);
271out:
272 return 0;
273}
274
275
276
277
278
279
280
281
282
283
284
285
286
287void
288xfs_iunlock(
289 xfs_inode_t *ip,
290 uint lock_flags)
291{
292
293
294
295
296
297 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
303 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304 ASSERT(lock_flags != 0);
305
306 if (lock_flags & XFS_IOLOCK_EXCL)
307 up_write(&VFS_I(ip)->i_rwsem);
308 else if (lock_flags & XFS_IOLOCK_SHARED)
309 up_read(&VFS_I(ip)->i_rwsem);
310
311 if (lock_flags & XFS_MMAPLOCK_EXCL)
312 mrunlock_excl(&ip->i_mmaplock);
313 else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 mrunlock_shared(&ip->i_mmaplock);
315
316 if (lock_flags & XFS_ILOCK_EXCL)
317 mrunlock_excl(&ip->i_lock);
318 else if (lock_flags & XFS_ILOCK_SHARED)
319 mrunlock_shared(&ip->i_lock);
320
321 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322}
323
324
325
326
327
328void
329xfs_ilock_demote(
330 xfs_inode_t *ip,
331 uint lock_flags)
332{
333 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334 ASSERT((lock_flags &
335 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336
337 if (lock_flags & XFS_ILOCK_EXCL)
338 mrdemote(&ip->i_lock);
339 if (lock_flags & XFS_MMAPLOCK_EXCL)
340 mrdemote(&ip->i_mmaplock);
341 if (lock_flags & XFS_IOLOCK_EXCL)
342 downgrade_write(&VFS_I(ip)->i_rwsem);
343
344 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345}
346
347#if defined(DEBUG) || defined(XFS_WARN)
348int
349xfs_isilocked(
350 xfs_inode_t *ip,
351 uint lock_flags)
352{
353 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 if (!(lock_flags & XFS_ILOCK_SHARED))
355 return !!ip->i_lock.mr_writer;
356 return rwsem_is_locked(&ip->i_lock.mr_lock);
357 }
358
359 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 return !!ip->i_mmaplock.mr_writer;
362 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363 }
364
365 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 if (!(lock_flags & XFS_IOLOCK_SHARED))
367 return !debug_locks ||
368 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370 }
371
372 ASSERT(0);
373 return 0;
374}
375#endif
376
377
378
379
380
381
382
383#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
384static bool
385xfs_lockdep_subclass_ok(
386 int subclass)
387{
388 return subclass < MAX_LOCKDEP_SUBCLASSES;
389}
390#else
391#define xfs_lockdep_subclass_ok(subclass) (true)
392#endif
393
394
395
396
397
398
399
400static inline int
401xfs_lock_inumorder(int lock_mode, int subclass)
402{
403 int class = 0;
404
405 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406 XFS_ILOCK_RTSUM)));
407 ASSERT(xfs_lockdep_subclass_ok(subclass));
408
409 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
410 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
411 class += subclass << XFS_IOLOCK_SHIFT;
412 }
413
414 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
415 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 class += subclass << XFS_MMAPLOCK_SHIFT;
417 }
418
419 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 class += subclass << XFS_ILOCK_SHIFT;
422 }
423
424 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442static void
443xfs_lock_inodes(
444 xfs_inode_t **ips,
445 int inodes,
446 uint lock_mode)
447{
448 int attempts = 0, i, j, try_lock;
449 xfs_log_item_t *lp;
450
451
452
453
454
455
456
457
458 ASSERT(ips && inodes >= 2 && inodes <= 5);
459 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460 XFS_ILOCK_EXCL));
461 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462 XFS_ILOCK_SHARED)));
463 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467
468 if (lock_mode & XFS_IOLOCK_EXCL) {
469 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472
473 try_lock = 0;
474 i = 0;
475again:
476 for (; i < inodes; i++) {
477 ASSERT(ips[i]);
478
479 if (i && (ips[i] == ips[i - 1]))
480 continue;
481
482
483
484
485
486 if (!try_lock) {
487 for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 lp = (xfs_log_item_t *)ips[j]->i_itemp;
489 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490 try_lock++;
491 }
492 }
493
494
495
496
497
498
499
500 if (!try_lock) {
501 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
502 continue;
503 }
504
505
506 ASSERT(i != 0);
507 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508 continue;
509
510
511
512
513
514 attempts++;
515 for (j = i - 1; j >= 0; j--) {
516
517
518
519
520
521 if (j != (i - 1) && ips[j] == ips[j + 1])
522 continue;
523
524 xfs_iunlock(ips[j], lock_mode);
525 }
526
527 if ((attempts % 5) == 0) {
528 delay(1);
529 }
530 i = 0;
531 try_lock = 0;
532 goto again;
533 }
534}
535
536
537
538
539
540
541
542
543
544void
545xfs_lock_two_inodes(
546 struct xfs_inode *ip0,
547 uint ip0_mode,
548 struct xfs_inode *ip1,
549 uint ip1_mode)
550{
551 struct xfs_inode *temp;
552 uint mode_temp;
553 int attempts = 0;
554 xfs_log_item_t *lp;
555
556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568
569 ASSERT(ip0->i_ino != ip1->i_ino);
570
571 if (ip0->i_ino > ip1->i_ino) {
572 temp = ip0;
573 ip0 = ip1;
574 ip1 = temp;
575 mode_temp = ip0_mode;
576 ip0_mode = ip1_mode;
577 ip1_mode = mode_temp;
578 }
579
580 again:
581 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582
583
584
585
586
587
588 lp = (xfs_log_item_t *)ip0->i_itemp;
589 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
590 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 xfs_iunlock(ip0, ip0_mode);
592 if ((++attempts % 5) == 0)
593 delay(1);
594 goto again;
595 }
596 } else {
597 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598 }
599}
600
601void
602__xfs_iflock(
603 struct xfs_inode *ip)
604{
605 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607
608 do {
609 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
610 if (xfs_isiflocked(ip))
611 io_schedule();
612 } while (!xfs_iflock_nowait(ip));
613
614 finish_wait(wq, &wait.wq_entry);
615}
616
617STATIC uint
618_xfs_dic2xflags(
619 uint16_t di_flags,
620 uint64_t di_flags2,
621 bool has_attr)
622{
623 uint flags = 0;
624
625 if (di_flags & XFS_DIFLAG_ANY) {
626 if (di_flags & XFS_DIFLAG_REALTIME)
627 flags |= FS_XFLAG_REALTIME;
628 if (di_flags & XFS_DIFLAG_PREALLOC)
629 flags |= FS_XFLAG_PREALLOC;
630 if (di_flags & XFS_DIFLAG_IMMUTABLE)
631 flags |= FS_XFLAG_IMMUTABLE;
632 if (di_flags & XFS_DIFLAG_APPEND)
633 flags |= FS_XFLAG_APPEND;
634 if (di_flags & XFS_DIFLAG_SYNC)
635 flags |= FS_XFLAG_SYNC;
636 if (di_flags & XFS_DIFLAG_NOATIME)
637 flags |= FS_XFLAG_NOATIME;
638 if (di_flags & XFS_DIFLAG_NODUMP)
639 flags |= FS_XFLAG_NODUMP;
640 if (di_flags & XFS_DIFLAG_RTINHERIT)
641 flags |= FS_XFLAG_RTINHERIT;
642 if (di_flags & XFS_DIFLAG_PROJINHERIT)
643 flags |= FS_XFLAG_PROJINHERIT;
644 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
645 flags |= FS_XFLAG_NOSYMLINKS;
646 if (di_flags & XFS_DIFLAG_EXTSIZE)
647 flags |= FS_XFLAG_EXTSIZE;
648 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
649 flags |= FS_XFLAG_EXTSZINHERIT;
650 if (di_flags & XFS_DIFLAG_NODEFRAG)
651 flags |= FS_XFLAG_NODEFRAG;
652 if (di_flags & XFS_DIFLAG_FILESTREAM)
653 flags |= FS_XFLAG_FILESTREAM;
654 }
655
656 if (di_flags2 & XFS_DIFLAG2_ANY) {
657 if (di_flags2 & XFS_DIFLAG2_DAX)
658 flags |= FS_XFLAG_DAX;
659 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660 flags |= FS_XFLAG_COWEXTSIZE;
661 }
662
663 if (has_attr)
664 flags |= FS_XFLAG_HASATTR;
665
666 return flags;
667}
668
669uint
670xfs_ip2xflags(
671 struct xfs_inode *ip)
672{
673 struct xfs_icdinode *dic = &ip->i_d;
674
675 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
676}
677
678
679
680
681
682
683
684int
685xfs_lookup(
686 xfs_inode_t *dp,
687 struct xfs_name *name,
688 xfs_inode_t **ipp,
689 struct xfs_name *ci_name)
690{
691 xfs_ino_t inum;
692 int error;
693
694 trace_xfs_lookup(dp, name);
695
696 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
697 return -EIO;
698
699 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
700 if (error)
701 goto out_unlock;
702
703 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704 if (error)
705 goto out_free_name;
706
707 return 0;
708
709out_free_name:
710 if (ci_name)
711 kmem_free(ci_name->name);
712out_unlock:
713 *ipp = NULL;
714 return error;
715}
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static int
749xfs_ialloc(
750 xfs_trans_t *tp,
751 xfs_inode_t *pip,
752 umode_t mode,
753 xfs_nlink_t nlink,
754 dev_t rdev,
755 prid_t prid,
756 xfs_buf_t **ialloc_context,
757 xfs_inode_t **ipp)
758{
759 struct xfs_mount *mp = tp->t_mountp;
760 xfs_ino_t ino;
761 xfs_inode_t *ip;
762 uint flags;
763 int error;
764 struct timespec64 tv;
765 struct inode *inode;
766
767
768
769
770
771 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
772 ialloc_context, &ino);
773 if (error)
774 return error;
775 if (*ialloc_context || ino == NULLFSINO) {
776 *ipp = NULL;
777 return 0;
778 }
779 ASSERT(*ialloc_context == NULL);
780
781
782
783
784
785
786
787
788 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
789 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
790 return -EFSCORRUPTED;
791 }
792
793
794
795
796
797
798 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
799 XFS_ILOCK_EXCL, &ip);
800 if (error)
801 return error;
802 ASSERT(ip != NULL);
803 inode = VFS_I(ip);
804
805
806
807
808
809
810 if (ip->i_d.di_version == 1)
811 ip->i_d.di_version = 2;
812
813 inode->i_mode = mode;
814 set_nlink(inode, nlink);
815 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
816 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
817 inode->i_rdev = rdev;
818 xfs_set_projid(ip, prid);
819
820 if (pip && XFS_INHERIT_GID(pip)) {
821 ip->i_d.di_gid = pip->i_d.di_gid;
822 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
823 inode->i_mode |= S_ISGID;
824 }
825
826
827
828
829
830
831 if ((irix_sgid_inherit) &&
832 (inode->i_mode & S_ISGID) &&
833 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
834 inode->i_mode &= ~S_ISGID;
835
836 ip->i_d.di_size = 0;
837 ip->i_d.di_nextents = 0;
838 ASSERT(ip->i_d.di_nblocks == 0);
839
840 tv = current_time(inode);
841 inode->i_mtime = tv;
842 inode->i_atime = tv;
843 inode->i_ctime = tv;
844
845 ip->i_d.di_extsize = 0;
846 ip->i_d.di_dmevmask = 0;
847 ip->i_d.di_dmstate = 0;
848 ip->i_d.di_flags = 0;
849
850 if (ip->i_d.di_version == 3) {
851 inode_set_iversion(inode, 1);
852 ip->i_d.di_flags2 = 0;
853 ip->i_d.di_cowextsize = 0;
854 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
855 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
856 }
857
858
859 flags = XFS_ILOG_CORE;
860 switch (mode & S_IFMT) {
861 case S_IFIFO:
862 case S_IFCHR:
863 case S_IFBLK:
864 case S_IFSOCK:
865 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
866 ip->i_df.if_flags = 0;
867 flags |= XFS_ILOG_DEV;
868 break;
869 case S_IFREG:
870 case S_IFDIR:
871 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
872 uint di_flags = 0;
873
874 if (S_ISDIR(mode)) {
875 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
876 di_flags |= XFS_DIFLAG_RTINHERIT;
877 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
878 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
879 ip->i_d.di_extsize = pip->i_d.di_extsize;
880 }
881 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
882 di_flags |= XFS_DIFLAG_PROJINHERIT;
883 } else if (S_ISREG(mode)) {
884 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
885 di_flags |= XFS_DIFLAG_REALTIME;
886 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
887 di_flags |= XFS_DIFLAG_EXTSIZE;
888 ip->i_d.di_extsize = pip->i_d.di_extsize;
889 }
890 }
891 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
892 xfs_inherit_noatime)
893 di_flags |= XFS_DIFLAG_NOATIME;
894 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
895 xfs_inherit_nodump)
896 di_flags |= XFS_DIFLAG_NODUMP;
897 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
898 xfs_inherit_sync)
899 di_flags |= XFS_DIFLAG_SYNC;
900 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
901 xfs_inherit_nosymlinks)
902 di_flags |= XFS_DIFLAG_NOSYMLINKS;
903 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
904 xfs_inherit_nodefrag)
905 di_flags |= XFS_DIFLAG_NODEFRAG;
906 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
907 di_flags |= XFS_DIFLAG_FILESTREAM;
908
909 ip->i_d.di_flags |= di_flags;
910 }
911 if (pip &&
912 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
913 pip->i_d.di_version == 3 &&
914 ip->i_d.di_version == 3) {
915 uint64_t di_flags2 = 0;
916
917 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
918 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
919 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
920 }
921 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
922 di_flags2 |= XFS_DIFLAG2_DAX;
923
924 ip->i_d.di_flags2 |= di_flags2;
925 }
926
927 case S_IFLNK:
928 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
929 ip->i_df.if_flags = XFS_IFEXTENTS;
930 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
931 ip->i_df.if_u1.if_root = NULL;
932 break;
933 default:
934 ASSERT(0);
935 }
936
937
938
939 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
940 ip->i_d.di_anextents = 0;
941
942
943
944
945 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
946 xfs_trans_log_inode(tp, ip, flags);
947
948
949 xfs_setup_inode(ip);
950
951 *ipp = ip;
952 return 0;
953}
954
955
956
957
958
959
960
961
962
963
964
965int
966xfs_dir_ialloc(
967 xfs_trans_t **tpp,
968
969 xfs_inode_t *dp,
970
971 umode_t mode,
972 xfs_nlink_t nlink,
973 dev_t rdev,
974 prid_t prid,
975 xfs_inode_t **ipp)
976
977{
978 xfs_trans_t *tp;
979 xfs_inode_t *ip;
980 xfs_buf_t *ialloc_context = NULL;
981 int code;
982 void *dqinfo;
983 uint tflags;
984
985 tp = *tpp;
986 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
1004 &ip);
1005
1006
1007
1008
1009
1010
1011 if (code) {
1012 *ipp = NULL;
1013 return code;
1014 }
1015 if (!ialloc_context && !ip) {
1016 *ipp = NULL;
1017 return -ENOSPC;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026 if (ialloc_context) {
1027
1028
1029
1030
1031
1032
1033
1034 xfs_trans_bhold(tp, ialloc_context);
1035
1036
1037
1038
1039
1040
1041 dqinfo = NULL;
1042 tflags = 0;
1043 if (tp->t_dqinfo) {
1044 dqinfo = (void *)tp->t_dqinfo;
1045 tp->t_dqinfo = NULL;
1046 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1047 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1048 }
1049
1050 code = xfs_trans_roll(&tp);
1051
1052
1053
1054
1055 if (dqinfo) {
1056 tp->t_dqinfo = dqinfo;
1057 tp->t_flags |= tflags;
1058 }
1059
1060 if (code) {
1061 xfs_buf_relse(ialloc_context);
1062 *tpp = tp;
1063 *ipp = NULL;
1064 return code;
1065 }
1066 xfs_trans_bjoin(tp, ialloc_context);
1067
1068
1069
1070
1071
1072
1073 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1074 &ialloc_context, &ip);
1075
1076
1077
1078
1079
1080 if (code) {
1081 *tpp = tp;
1082 *ipp = NULL;
1083 return code;
1084 }
1085 ASSERT(!ialloc_context && ip);
1086
1087 }
1088
1089 *ipp = ip;
1090 *tpp = tp;
1091
1092 return 0;
1093}
1094
1095
1096
1097
1098
1099
1100static int
1101xfs_droplink(
1102 xfs_trans_t *tp,
1103 xfs_inode_t *ip)
1104{
1105 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1106
1107 drop_nlink(VFS_I(ip));
1108 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1109
1110 if (VFS_I(ip)->i_nlink)
1111 return 0;
1112
1113 return xfs_iunlink(tp, ip);
1114}
1115
1116
1117
1118
1119static int
1120xfs_bumplink(
1121 xfs_trans_t *tp,
1122 xfs_inode_t *ip)
1123{
1124 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1125
1126 ASSERT(ip->i_d.di_version > 1);
1127 inc_nlink(VFS_I(ip));
1128 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1129 return 0;
1130}
1131
1132int
1133xfs_create(
1134 xfs_inode_t *dp,
1135 struct xfs_name *name,
1136 umode_t mode,
1137 dev_t rdev,
1138 xfs_inode_t **ipp)
1139{
1140 int is_dir = S_ISDIR(mode);
1141 struct xfs_mount *mp = dp->i_mount;
1142 struct xfs_inode *ip = NULL;
1143 struct xfs_trans *tp = NULL;
1144 int error;
1145 struct xfs_defer_ops dfops;
1146 xfs_fsblock_t first_block;
1147 bool unlock_dp_on_error = false;
1148 prid_t prid;
1149 struct xfs_dquot *udqp = NULL;
1150 struct xfs_dquot *gdqp = NULL;
1151 struct xfs_dquot *pdqp = NULL;
1152 struct xfs_trans_res *tres;
1153 uint resblks;
1154
1155 trace_xfs_create(dp, name);
1156
1157 if (XFS_FORCED_SHUTDOWN(mp))
1158 return -EIO;
1159
1160 prid = xfs_get_initial_prid(dp);
1161
1162
1163
1164
1165 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1166 xfs_kgid_to_gid(current_fsgid()), prid,
1167 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1168 &udqp, &gdqp, &pdqp);
1169 if (error)
1170 return error;
1171
1172 if (is_dir) {
1173 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1174 tres = &M_RES(mp)->tr_mkdir;
1175 } else {
1176 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1177 tres = &M_RES(mp)->tr_create;
1178 }
1179
1180
1181
1182
1183
1184
1185
1186 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1187 if (error == -ENOSPC) {
1188
1189 xfs_flush_inodes(mp);
1190 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1191 }
1192 if (error)
1193 goto out_release_inode;
1194
1195 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1196 unlock_dp_on_error = true;
1197
1198 xfs_defer_init(&dfops, &first_block);
1199 tp->t_agfl_dfops = &dfops;
1200
1201
1202
1203
1204 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1205 pdqp, resblks, 1, 0);
1206 if (error)
1207 goto out_trans_cancel;
1208
1209
1210
1211
1212
1213
1214 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1215 if (error)
1216 goto out_trans_cancel;
1217
1218
1219
1220
1221
1222
1223
1224
1225 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1226 unlock_dp_on_error = false;
1227
1228 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1229 &first_block, &dfops, resblks ?
1230 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1231 if (error) {
1232 ASSERT(error != -ENOSPC);
1233 goto out_trans_cancel;
1234 }
1235 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1236 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1237
1238 if (is_dir) {
1239 error = xfs_dir_init(tp, ip, dp);
1240 if (error)
1241 goto out_bmap_cancel;
1242
1243 error = xfs_bumplink(tp, dp);
1244 if (error)
1245 goto out_bmap_cancel;
1246 }
1247
1248
1249
1250
1251
1252
1253 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1254 xfs_trans_set_sync(tp);
1255
1256
1257
1258
1259
1260
1261 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1262
1263 error = xfs_defer_finish(&tp, &dfops);
1264 if (error)
1265 goto out_bmap_cancel;
1266
1267 error = xfs_trans_commit(tp);
1268 if (error)
1269 goto out_release_inode;
1270
1271 xfs_qm_dqrele(udqp);
1272 xfs_qm_dqrele(gdqp);
1273 xfs_qm_dqrele(pdqp);
1274
1275 *ipp = ip;
1276 return 0;
1277
1278 out_bmap_cancel:
1279 xfs_defer_cancel(&dfops);
1280 out_trans_cancel:
1281 xfs_trans_cancel(tp);
1282 out_release_inode:
1283
1284
1285
1286
1287
1288 if (ip) {
1289 xfs_finish_inode_setup(ip);
1290 IRELE(ip);
1291 }
1292
1293 xfs_qm_dqrele(udqp);
1294 xfs_qm_dqrele(gdqp);
1295 xfs_qm_dqrele(pdqp);
1296
1297 if (unlock_dp_on_error)
1298 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1299 return error;
1300}
1301
1302int
1303xfs_create_tmpfile(
1304 struct xfs_inode *dp,
1305 umode_t mode,
1306 struct xfs_inode **ipp)
1307{
1308 struct xfs_mount *mp = dp->i_mount;
1309 struct xfs_inode *ip = NULL;
1310 struct xfs_trans *tp = NULL;
1311 int error;
1312 prid_t prid;
1313 struct xfs_dquot *udqp = NULL;
1314 struct xfs_dquot *gdqp = NULL;
1315 struct xfs_dquot *pdqp = NULL;
1316 struct xfs_trans_res *tres;
1317 uint resblks;
1318
1319 if (XFS_FORCED_SHUTDOWN(mp))
1320 return -EIO;
1321
1322 prid = xfs_get_initial_prid(dp);
1323
1324
1325
1326
1327 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1328 xfs_kgid_to_gid(current_fsgid()), prid,
1329 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1330 &udqp, &gdqp, &pdqp);
1331 if (error)
1332 return error;
1333
1334 resblks = XFS_IALLOC_SPACE_RES(mp);
1335 tres = &M_RES(mp)->tr_create_tmpfile;
1336
1337 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1338 if (error)
1339 goto out_release_inode;
1340
1341 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1342 pdqp, resblks, 1, 0);
1343 if (error)
1344 goto out_trans_cancel;
1345
1346 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
1347 if (error)
1348 goto out_trans_cancel;
1349
1350 if (mp->m_flags & XFS_MOUNT_WSYNC)
1351 xfs_trans_set_sync(tp);
1352
1353
1354
1355
1356
1357
1358 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1359
1360 error = xfs_iunlink(tp, ip);
1361 if (error)
1362 goto out_trans_cancel;
1363
1364 error = xfs_trans_commit(tp);
1365 if (error)
1366 goto out_release_inode;
1367
1368 xfs_qm_dqrele(udqp);
1369 xfs_qm_dqrele(gdqp);
1370 xfs_qm_dqrele(pdqp);
1371
1372 *ipp = ip;
1373 return 0;
1374
1375 out_trans_cancel:
1376 xfs_trans_cancel(tp);
1377 out_release_inode:
1378
1379
1380
1381
1382
1383 if (ip) {
1384 xfs_finish_inode_setup(ip);
1385 IRELE(ip);
1386 }
1387
1388 xfs_qm_dqrele(udqp);
1389 xfs_qm_dqrele(gdqp);
1390 xfs_qm_dqrele(pdqp);
1391
1392 return error;
1393}
1394
1395int
1396xfs_link(
1397 xfs_inode_t *tdp,
1398 xfs_inode_t *sip,
1399 struct xfs_name *target_name)
1400{
1401 xfs_mount_t *mp = tdp->i_mount;
1402 xfs_trans_t *tp;
1403 int error;
1404 struct xfs_defer_ops dfops;
1405 xfs_fsblock_t first_block;
1406 int resblks;
1407
1408 trace_xfs_link(tdp, target_name);
1409
1410 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1411
1412 if (XFS_FORCED_SHUTDOWN(mp))
1413 return -EIO;
1414
1415 error = xfs_qm_dqattach(sip);
1416 if (error)
1417 goto std_return;
1418
1419 error = xfs_qm_dqattach(tdp);
1420 if (error)
1421 goto std_return;
1422
1423 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1424 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1425 if (error == -ENOSPC) {
1426 resblks = 0;
1427 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1428 }
1429 if (error)
1430 goto std_return;
1431
1432 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1433
1434 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1435 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1436
1437
1438
1439
1440
1441
1442 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1443 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1444 error = -EXDEV;
1445 goto error_return;
1446 }
1447
1448 if (!resblks) {
1449 error = xfs_dir_canenter(tp, tdp, target_name);
1450 if (error)
1451 goto error_return;
1452 }
1453
1454 xfs_defer_init(&dfops, &first_block);
1455 tp->t_agfl_dfops = &dfops;
1456
1457
1458
1459
1460 if (VFS_I(sip)->i_nlink == 0) {
1461 error = xfs_iunlink_remove(tp, sip);
1462 if (error)
1463 goto error_return;
1464 }
1465
1466 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1467 &first_block, &dfops, resblks);
1468 if (error)
1469 goto error_return;
1470 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1471 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1472
1473 error = xfs_bumplink(tp, sip);
1474 if (error)
1475 goto error_return;
1476
1477
1478
1479
1480
1481
1482 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1483 xfs_trans_set_sync(tp);
1484
1485 error = xfs_defer_finish(&tp, &dfops);
1486 if (error) {
1487 xfs_defer_cancel(&dfops);
1488 goto error_return;
1489 }
1490
1491 return xfs_trans_commit(tp);
1492
1493 error_return:
1494 xfs_trans_cancel(tp);
1495 std_return:
1496 return error;
1497}
1498
1499
1500static void
1501xfs_itruncate_clear_reflink_flags(
1502 struct xfs_inode *ip)
1503{
1504 struct xfs_ifork *dfork;
1505 struct xfs_ifork *cfork;
1506
1507 if (!xfs_is_reflink_inode(ip))
1508 return;
1509 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1510 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1511 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1512 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1513 if (cfork->if_bytes == 0)
1514 xfs_inode_clear_cowblocks_tag(ip);
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538int
1539xfs_itruncate_extents_flags(
1540 struct xfs_trans **tpp,
1541 struct xfs_inode *ip,
1542 int whichfork,
1543 xfs_fsize_t new_size,
1544 int flags)
1545{
1546 struct xfs_mount *mp = ip->i_mount;
1547 struct xfs_trans *tp = *tpp;
1548 struct xfs_defer_ops dfops;
1549 xfs_fsblock_t first_block;
1550 xfs_fileoff_t first_unmap_block;
1551 xfs_fileoff_t last_block;
1552 xfs_filblks_t unmap_len;
1553 int error = 0;
1554 int done = 0;
1555
1556 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1557 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1558 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1559 ASSERT(new_size <= XFS_ISIZE(ip));
1560 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1561 ASSERT(ip->i_itemp != NULL);
1562 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1563 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1564
1565 trace_xfs_itruncate_extents_start(ip, new_size);
1566
1567 flags |= xfs_bmapi_aflag(whichfork);
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1579 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1580 if (first_unmap_block == last_block)
1581 return 0;
1582
1583 ASSERT(first_unmap_block < last_block);
1584 unmap_len = last_block - first_unmap_block + 1;
1585 while (!done) {
1586 xfs_defer_init(&dfops, &first_block);
1587 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
1588 XFS_ITRUNC_MAX_EXTENTS, &first_block,
1589 &dfops, &done);
1590 if (error)
1591 goto out_bmap_cancel;
1592
1593
1594
1595
1596
1597 xfs_defer_ijoin(&dfops, ip);
1598 error = xfs_defer_finish(&tp, &dfops);
1599 if (error)
1600 goto out_bmap_cancel;
1601
1602 error = xfs_trans_roll_inode(&tp, ip);
1603 if (error)
1604 goto out;
1605 }
1606
1607 if (whichfork == XFS_DATA_FORK) {
1608
1609 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1610 first_unmap_block, last_block, true);
1611 if (error)
1612 goto out;
1613
1614 xfs_itruncate_clear_reflink_flags(ip);
1615 }
1616
1617
1618
1619
1620
1621 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1622
1623 trace_xfs_itruncate_extents_end(ip, new_size);
1624
1625out:
1626 *tpp = tp;
1627 return error;
1628out_bmap_cancel:
1629
1630
1631
1632
1633
1634 xfs_defer_cancel(&dfops);
1635 goto out;
1636}
1637
1638int
1639xfs_release(
1640 xfs_inode_t *ip)
1641{
1642 xfs_mount_t *mp = ip->i_mount;
1643 int error;
1644
1645 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1646 return 0;
1647
1648
1649 if (mp->m_flags & XFS_MOUNT_RDONLY)
1650 return 0;
1651
1652 if (!XFS_FORCED_SHUTDOWN(mp)) {
1653 int truncated;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1666 if (truncated) {
1667 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1668 if (ip->i_delayed_blks > 0) {
1669 error = filemap_flush(VFS_I(ip)->i_mapping);
1670 if (error)
1671 return error;
1672 }
1673 }
1674 }
1675
1676 if (VFS_I(ip)->i_nlink == 0)
1677 return 0;
1678
1679 if (xfs_can_free_eofblocks(ip, false)) {
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1696 return 0;
1697
1698
1699
1700
1701
1702
1703
1704 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1705 error = xfs_free_eofblocks(ip);
1706 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1707 if (error)
1708 return error;
1709 }
1710
1711
1712 if (ip->i_delayed_blks)
1713 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1714 }
1715 return 0;
1716}
1717
1718
1719
1720
1721
1722
1723STATIC int
1724xfs_inactive_truncate(
1725 struct xfs_inode *ip)
1726{
1727 struct xfs_mount *mp = ip->i_mount;
1728 struct xfs_trans *tp;
1729 int error;
1730
1731 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1732 if (error) {
1733 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1734 return error;
1735 }
1736
1737 xfs_ilock(ip, XFS_ILOCK_EXCL);
1738 xfs_trans_ijoin(tp, ip, 0);
1739
1740
1741
1742
1743
1744
1745 ip->i_d.di_size = 0;
1746 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1747
1748 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1749 if (error)
1750 goto error_trans_cancel;
1751
1752 ASSERT(ip->i_d.di_nextents == 0);
1753
1754 error = xfs_trans_commit(tp);
1755 if (error)
1756 goto error_unlock;
1757
1758 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1759 return 0;
1760
1761error_trans_cancel:
1762 xfs_trans_cancel(tp);
1763error_unlock:
1764 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1765 return error;
1766}
1767
1768
1769
1770
1771
1772
1773STATIC int
1774xfs_inactive_ifree(
1775 struct xfs_inode *ip)
1776{
1777 struct xfs_defer_ops dfops;
1778 xfs_fsblock_t first_block;
1779 struct xfs_mount *mp = ip->i_mount;
1780 struct xfs_trans *tp;
1781 int error;
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (unlikely(mp->m_inotbt_nores)) {
1795 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1796 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1797 &tp);
1798 } else {
1799 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1800 }
1801 if (error) {
1802 if (error == -ENOSPC) {
1803 xfs_warn_ratelimited(mp,
1804 "Failed to remove inode(s) from unlinked list. "
1805 "Please free space, unmount and run xfs_repair.");
1806 } else {
1807 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1808 }
1809 return error;
1810 }
1811
1812 xfs_ilock(ip, XFS_ILOCK_EXCL);
1813 xfs_trans_ijoin(tp, ip, 0);
1814
1815 xfs_defer_init(&dfops, &first_block);
1816 tp->t_agfl_dfops = &dfops;
1817 error = xfs_ifree(tp, ip, &dfops);
1818 if (error) {
1819
1820
1821
1822
1823
1824 if (!XFS_FORCED_SHUTDOWN(mp)) {
1825 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1826 __func__, error);
1827 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1828 }
1829 xfs_trans_cancel(tp);
1830 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1831 return error;
1832 }
1833
1834
1835
1836
1837 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1838
1839
1840
1841
1842
1843 error = xfs_defer_finish(&tp, &dfops);
1844 if (error) {
1845 xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
1846 __func__, error);
1847 xfs_defer_cancel(&dfops);
1848 }
1849 error = xfs_trans_commit(tp);
1850 if (error)
1851 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1852 __func__, error);
1853
1854 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1855 return 0;
1856}
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866void
1867xfs_inactive(
1868 xfs_inode_t *ip)
1869{
1870 struct xfs_mount *mp;
1871 struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1872 int error;
1873 int truncate = 0;
1874
1875
1876
1877
1878
1879 if (VFS_I(ip)->i_mode == 0) {
1880 ASSERT(ip->i_df.if_real_bytes == 0);
1881 ASSERT(ip->i_df.if_broot_bytes == 0);
1882 return;
1883 }
1884
1885 mp = ip->i_mount;
1886 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1887
1888
1889 if (mp->m_flags & XFS_MOUNT_RDONLY)
1890 return;
1891
1892
1893 if (xfs_is_reflink_inode(ip) && cow_ifp->if_bytes > 0)
1894 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1895
1896 if (VFS_I(ip)->i_nlink != 0) {
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 if (xfs_can_free_eofblocks(ip, true))
1907 xfs_free_eofblocks(ip);
1908
1909 return;
1910 }
1911
1912 if (S_ISREG(VFS_I(ip)->i_mode) &&
1913 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1914 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1915 truncate = 1;
1916
1917 error = xfs_qm_dqattach(ip);
1918 if (error)
1919 return;
1920
1921 if (S_ISLNK(VFS_I(ip)->i_mode))
1922 error = xfs_inactive_symlink(ip);
1923 else if (truncate)
1924 error = xfs_inactive_truncate(ip);
1925 if (error)
1926 return;
1927
1928
1929
1930
1931
1932
1933 if (XFS_IFORK_Q(ip)) {
1934 error = xfs_attr_inactive(ip);
1935 if (error)
1936 return;
1937 }
1938
1939 ASSERT(!ip->i_afp);
1940 ASSERT(ip->i_d.di_anextents == 0);
1941 ASSERT(ip->i_d.di_forkoff == 0);
1942
1943
1944
1945
1946 error = xfs_inactive_ifree(ip);
1947 if (error)
1948 return;
1949
1950
1951
1952
1953 xfs_qm_dqdetach(ip);
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966STATIC int
1967xfs_iunlink(
1968 struct xfs_trans *tp,
1969 struct xfs_inode *ip)
1970{
1971 xfs_mount_t *mp = tp->t_mountp;
1972 xfs_agi_t *agi;
1973 xfs_dinode_t *dip;
1974 xfs_buf_t *agibp;
1975 xfs_buf_t *ibp;
1976 xfs_agino_t agino;
1977 short bucket_index;
1978 int offset;
1979 int error;
1980
1981 ASSERT(VFS_I(ip)->i_mode != 0);
1982
1983
1984
1985
1986
1987 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1988 if (error)
1989 return error;
1990 agi = XFS_BUF_TO_AGI(agibp);
1991
1992
1993
1994
1995
1996 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1997 ASSERT(agino != 0);
1998 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1999 ASSERT(agi->agi_unlinked[bucket_index]);
2000 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
2001
2002 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
2003
2004
2005
2006
2007
2008
2009 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2010 0, 0);
2011 if (error)
2012 return error;
2013
2014 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
2015 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
2016 offset = ip->i_imap.im_boffset +
2017 offsetof(xfs_dinode_t, di_next_unlinked);
2018
2019
2020 xfs_dinode_calc_crc(mp, dip);
2021
2022 xfs_trans_inode_buf(tp, ibp);
2023 xfs_trans_log_buf(tp, ibp, offset,
2024 (offset + sizeof(xfs_agino_t) - 1));
2025 xfs_inobp_check(mp, ibp);
2026 }
2027
2028
2029
2030
2031 ASSERT(agino != 0);
2032 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
2033 offset = offsetof(xfs_agi_t, agi_unlinked) +
2034 (sizeof(xfs_agino_t) * bucket_index);
2035 xfs_trans_log_buf(tp, agibp, offset,
2036 (offset + sizeof(xfs_agino_t) - 1));
2037 return 0;
2038}
2039
2040
2041
2042
2043STATIC int
2044xfs_iunlink_remove(
2045 xfs_trans_t *tp,
2046 xfs_inode_t *ip)
2047{
2048 xfs_ino_t next_ino;
2049 xfs_mount_t *mp;
2050 xfs_agi_t *agi;
2051 xfs_dinode_t *dip;
2052 xfs_buf_t *agibp;
2053 xfs_buf_t *ibp;
2054 xfs_agnumber_t agno;
2055 xfs_agino_t agino;
2056 xfs_agino_t next_agino;
2057 xfs_buf_t *last_ibp;
2058 xfs_dinode_t *last_dip = NULL;
2059 short bucket_index;
2060 int offset, last_offset = 0;
2061 int error;
2062
2063 mp = tp->t_mountp;
2064 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2065
2066
2067
2068
2069
2070 error = xfs_read_agi(mp, tp, agno, &agibp);
2071 if (error)
2072 return error;
2073
2074 agi = XFS_BUF_TO_AGI(agibp);
2075
2076
2077
2078
2079
2080 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2081 if (!xfs_verify_agino(mp, agno, agino))
2082 return -EFSCORRUPTED;
2083 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2084 if (!xfs_verify_agino(mp, agno,
2085 be32_to_cpu(agi->agi_unlinked[bucket_index]))) {
2086 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2087 agi, sizeof(*agi));
2088 return -EFSCORRUPTED;
2089 }
2090
2091 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2092
2093
2094
2095
2096
2097
2098
2099 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2100 0, 0);
2101 if (error) {
2102 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2103 __func__, error);
2104 return error;
2105 }
2106 next_agino = be32_to_cpu(dip->di_next_unlinked);
2107 ASSERT(next_agino != 0);
2108 if (next_agino != NULLAGINO) {
2109 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2110 offset = ip->i_imap.im_boffset +
2111 offsetof(xfs_dinode_t, di_next_unlinked);
2112
2113
2114 xfs_dinode_calc_crc(mp, dip);
2115
2116 xfs_trans_inode_buf(tp, ibp);
2117 xfs_trans_log_buf(tp, ibp, offset,
2118 (offset + sizeof(xfs_agino_t) - 1));
2119 xfs_inobp_check(mp, ibp);
2120 } else {
2121 xfs_trans_brelse(tp, ibp);
2122 }
2123
2124
2125
2126 ASSERT(next_agino != 0);
2127 ASSERT(next_agino != agino);
2128 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2129 offset = offsetof(xfs_agi_t, agi_unlinked) +
2130 (sizeof(xfs_agino_t) * bucket_index);
2131 xfs_trans_log_buf(tp, agibp, offset,
2132 (offset + sizeof(xfs_agino_t) - 1));
2133 } else {
2134
2135
2136
2137 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2138 last_ibp = NULL;
2139 while (next_agino != agino) {
2140 struct xfs_imap imap;
2141
2142 if (last_ibp)
2143 xfs_trans_brelse(tp, last_ibp);
2144
2145 imap.im_blkno = 0;
2146 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2147
2148 error = xfs_imap(mp, tp, next_ino, &imap, 0);
2149 if (error) {
2150 xfs_warn(mp,
2151 "%s: xfs_imap returned error %d.",
2152 __func__, error);
2153 return error;
2154 }
2155
2156 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2157 &last_ibp, 0, 0);
2158 if (error) {
2159 xfs_warn(mp,
2160 "%s: xfs_imap_to_bp returned error %d.",
2161 __func__, error);
2162 return error;
2163 }
2164
2165 last_offset = imap.im_boffset;
2166 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2167 if (!xfs_verify_agino(mp, agno, next_agino)) {
2168 XFS_CORRUPTION_ERROR(__func__,
2169 XFS_ERRLEVEL_LOW, mp,
2170 last_dip, sizeof(*last_dip));
2171 return -EFSCORRUPTED;
2172 }
2173 }
2174
2175
2176
2177
2178
2179 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2180 0, 0);
2181 if (error) {
2182 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
2183 __func__, error);
2184 return error;
2185 }
2186 next_agino = be32_to_cpu(dip->di_next_unlinked);
2187 ASSERT(next_agino != 0);
2188 ASSERT(next_agino != agino);
2189 if (next_agino != NULLAGINO) {
2190 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2191 offset = ip->i_imap.im_boffset +
2192 offsetof(xfs_dinode_t, di_next_unlinked);
2193
2194
2195 xfs_dinode_calc_crc(mp, dip);
2196
2197 xfs_trans_inode_buf(tp, ibp);
2198 xfs_trans_log_buf(tp, ibp, offset,
2199 (offset + sizeof(xfs_agino_t) - 1));
2200 xfs_inobp_check(mp, ibp);
2201 } else {
2202 xfs_trans_brelse(tp, ibp);
2203 }
2204
2205
2206
2207 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2208 ASSERT(next_agino != 0);
2209 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2210
2211
2212 xfs_dinode_calc_crc(mp, last_dip);
2213
2214 xfs_trans_inode_buf(tp, last_ibp);
2215 xfs_trans_log_buf(tp, last_ibp, offset,
2216 (offset + sizeof(xfs_agino_t) - 1));
2217 xfs_inobp_check(mp, last_ibp);
2218 }
2219 return 0;
2220}
2221
2222
2223
2224
2225
2226
2227STATIC int
2228xfs_ifree_cluster(
2229 xfs_inode_t *free_ip,
2230 xfs_trans_t *tp,
2231 struct xfs_icluster *xic)
2232{
2233 xfs_mount_t *mp = free_ip->i_mount;
2234 int blks_per_cluster;
2235 int inodes_per_cluster;
2236 int nbufs;
2237 int i, j;
2238 int ioffset;
2239 xfs_daddr_t blkno;
2240 xfs_buf_t *bp;
2241 xfs_inode_t *ip;
2242 xfs_inode_log_item_t *iip;
2243 struct xfs_log_item *lip;
2244 struct xfs_perag *pag;
2245 xfs_ino_t inum;
2246
2247 inum = xic->first_ino;
2248 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2249 blks_per_cluster = xfs_icluster_size_fsb(mp);
2250 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2251 nbufs = mp->m_ialloc_blks / blks_per_cluster;
2252
2253 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
2254
2255
2256
2257
2258
2259 ioffset = inum - xic->first_ino;
2260 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2261 ASSERT(ioffset % inodes_per_cluster == 0);
2262 continue;
2263 }
2264
2265 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2266 XFS_INO_TO_AGBNO(mp, inum));
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2277 mp->m_bsize * blks_per_cluster,
2278 XBF_UNMAPPED);
2279
2280 if (!bp)
2281 return -ENOMEM;
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 bp->b_ops = &xfs_inode_buf_ops;
2293
2294
2295
2296
2297
2298
2299
2300
2301 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
2302 if (lip->li_type == XFS_LI_INODE) {
2303 iip = (xfs_inode_log_item_t *)lip;
2304 ASSERT(iip->ili_logged == 1);
2305 lip->li_cb = xfs_istale_done;
2306 xfs_trans_ail_copy_lsn(mp->m_ail,
2307 &iip->ili_flush_lsn,
2308 &iip->ili_item.li_lsn);
2309 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2310 }
2311 }
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324 for (i = 0; i < inodes_per_cluster; i++) {
2325retry:
2326 rcu_read_lock();
2327 ip = radix_tree_lookup(&pag->pag_ici_root,
2328 XFS_INO_TO_AGINO(mp, (inum + i)));
2329
2330
2331 if (!ip) {
2332 rcu_read_unlock();
2333 continue;
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343 spin_lock(&ip->i_flags_lock);
2344 if (ip->i_ino != inum + i ||
2345 __xfs_iflags_test(ip, XFS_ISTALE)) {
2346 spin_unlock(&ip->i_flags_lock);
2347 rcu_read_unlock();
2348 continue;
2349 }
2350 spin_unlock(&ip->i_flags_lock);
2351
2352
2353
2354
2355
2356
2357
2358
2359 if (ip != free_ip) {
2360 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2361 rcu_read_unlock();
2362 delay(1);
2363 goto retry;
2364 }
2365
2366
2367
2368
2369
2370
2371
2372
2373 if (ip->i_ino != inum + i) {
2374 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2375 rcu_read_unlock();
2376 continue;
2377 }
2378 }
2379 rcu_read_unlock();
2380
2381 xfs_iflock(ip);
2382 xfs_iflags_set(ip, XFS_ISTALE);
2383
2384
2385
2386
2387
2388 iip = ip->i_itemp;
2389 if (!iip || xfs_inode_clean(ip)) {
2390 ASSERT(ip != free_ip);
2391 xfs_ifunlock(ip);
2392 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2393 continue;
2394 }
2395
2396 iip->ili_last_fields = iip->ili_fields;
2397 iip->ili_fields = 0;
2398 iip->ili_fsync_fields = 0;
2399 iip->ili_logged = 1;
2400 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2401 &iip->ili_item.li_lsn);
2402
2403 xfs_buf_attach_iodone(bp, xfs_istale_done,
2404 &iip->ili_item);
2405
2406 if (ip != free_ip)
2407 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2408 }
2409
2410 xfs_trans_stale_inode_buf(tp, bp);
2411 xfs_trans_binval(tp, bp);
2412 }
2413
2414 xfs_perag_put(pag);
2415 return 0;
2416}
2417
2418
2419
2420
2421
2422static inline void
2423xfs_ifree_local_data(
2424 struct xfs_inode *ip,
2425 int whichfork)
2426{
2427 struct xfs_ifork *ifp;
2428
2429 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2430 return;
2431
2432 ifp = XFS_IFORK_PTR(ip, whichfork);
2433 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446int
2447xfs_ifree(
2448 xfs_trans_t *tp,
2449 xfs_inode_t *ip,
2450 struct xfs_defer_ops *dfops)
2451{
2452 int error;
2453 struct xfs_icluster xic = { 0 };
2454
2455 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2456 ASSERT(VFS_I(ip)->i_nlink == 0);
2457 ASSERT(ip->i_d.di_nextents == 0);
2458 ASSERT(ip->i_d.di_anextents == 0);
2459 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2460 ASSERT(ip->i_d.di_nblocks == 0);
2461
2462
2463
2464
2465 error = xfs_iunlink_remove(tp, ip);
2466 if (error)
2467 return error;
2468
2469 error = xfs_difree(tp, ip->i_ino, dfops, &xic);
2470 if (error)
2471 return error;
2472
2473 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2474 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2475
2476 VFS_I(ip)->i_mode = 0;
2477 ip->i_d.di_flags = 0;
2478 ip->i_d.di_flags2 = 0;
2479 ip->i_d.di_dmevmask = 0;
2480 ip->i_d.di_forkoff = 0;
2481 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2482 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2483
2484
2485 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2486
2487
2488
2489
2490
2491 VFS_I(ip)->i_generation++;
2492 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2493
2494 if (xic.deleted)
2495 error = xfs_ifree_cluster(ip, tp, &xic);
2496
2497 return error;
2498}
2499
2500
2501
2502
2503
2504
2505static void
2506xfs_iunpin(
2507 struct xfs_inode *ip)
2508{
2509 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2510
2511 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2512
2513
2514 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2515
2516}
2517
2518static void
2519__xfs_iunpin_wait(
2520 struct xfs_inode *ip)
2521{
2522 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2523 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2524
2525 xfs_iunpin(ip);
2526
2527 do {
2528 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2529 if (xfs_ipincount(ip))
2530 io_schedule();
2531 } while (xfs_ipincount(ip));
2532 finish_wait(wq, &wait.wq_entry);
2533}
2534
2535void
2536xfs_iunpin_wait(
2537 struct xfs_inode *ip)
2538{
2539 if (xfs_ipincount(ip))
2540 __xfs_iunpin_wait(ip);
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570int
2571xfs_remove(
2572 xfs_inode_t *dp,
2573 struct xfs_name *name,
2574 xfs_inode_t *ip)
2575{
2576 xfs_mount_t *mp = dp->i_mount;
2577 xfs_trans_t *tp = NULL;
2578 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2579 int error = 0;
2580 struct xfs_defer_ops dfops;
2581 xfs_fsblock_t first_block;
2582 uint resblks;
2583
2584 trace_xfs_remove(dp, name);
2585
2586 if (XFS_FORCED_SHUTDOWN(mp))
2587 return -EIO;
2588
2589 error = xfs_qm_dqattach(dp);
2590 if (error)
2591 goto std_return;
2592
2593 error = xfs_qm_dqattach(ip);
2594 if (error)
2595 goto std_return;
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 resblks = XFS_REMOVE_SPACE_RES(mp);
2607 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2608 if (error == -ENOSPC) {
2609 resblks = 0;
2610 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2611 &tp);
2612 }
2613 if (error) {
2614 ASSERT(error != -ENOSPC);
2615 goto std_return;
2616 }
2617
2618 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2619
2620 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2621 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2622
2623
2624
2625
2626 if (is_dir) {
2627 ASSERT(VFS_I(ip)->i_nlink >= 2);
2628 if (VFS_I(ip)->i_nlink != 2) {
2629 error = -ENOTEMPTY;
2630 goto out_trans_cancel;
2631 }
2632 if (!xfs_dir_isempty(ip)) {
2633 error = -ENOTEMPTY;
2634 goto out_trans_cancel;
2635 }
2636
2637
2638 error = xfs_droplink(tp, dp);
2639 if (error)
2640 goto out_trans_cancel;
2641
2642
2643 error = xfs_droplink(tp, ip);
2644 if (error)
2645 goto out_trans_cancel;
2646 } else {
2647
2648
2649
2650
2651
2652 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2653 }
2654 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2655
2656
2657 error = xfs_droplink(tp, ip);
2658 if (error)
2659 goto out_trans_cancel;
2660
2661 xfs_defer_init(&dfops, &first_block);
2662 tp->t_agfl_dfops = &dfops;
2663 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2664 &first_block, &dfops, resblks);
2665 if (error) {
2666 ASSERT(error != -ENOENT);
2667 goto out_bmap_cancel;
2668 }
2669
2670
2671
2672
2673
2674
2675 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2676 xfs_trans_set_sync(tp);
2677
2678 error = xfs_defer_finish(&tp, &dfops);
2679 if (error)
2680 goto out_bmap_cancel;
2681
2682 error = xfs_trans_commit(tp);
2683 if (error)
2684 goto std_return;
2685
2686 if (is_dir && xfs_inode_is_filestream(ip))
2687 xfs_filestream_deassociate(ip);
2688
2689 return 0;
2690
2691 out_bmap_cancel:
2692 xfs_defer_cancel(&dfops);
2693 out_trans_cancel:
2694 xfs_trans_cancel(tp);
2695 std_return:
2696 return error;
2697}
2698
2699
2700
2701
2702#define __XFS_SORT_INODES 5
2703STATIC void
2704xfs_sort_for_rename(
2705 struct xfs_inode *dp1,
2706 struct xfs_inode *dp2,
2707 struct xfs_inode *ip1,
2708 struct xfs_inode *ip2,
2709 struct xfs_inode *wip,
2710 struct xfs_inode **i_tab,
2711 int *num_inodes)
2712{
2713 int i, j;
2714
2715 ASSERT(*num_inodes == __XFS_SORT_INODES);
2716 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2717
2718
2719
2720
2721
2722
2723
2724
2725 i = 0;
2726 i_tab[i++] = dp1;
2727 i_tab[i++] = dp2;
2728 i_tab[i++] = ip1;
2729 if (ip2)
2730 i_tab[i++] = ip2;
2731 if (wip)
2732 i_tab[i++] = wip;
2733 *num_inodes = i;
2734
2735
2736
2737
2738
2739 for (i = 0; i < *num_inodes; i++) {
2740 for (j = 1; j < *num_inodes; j++) {
2741 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2742 struct xfs_inode *temp = i_tab[j];
2743 i_tab[j] = i_tab[j-1];
2744 i_tab[j-1] = temp;
2745 }
2746 }
2747 }
2748}
2749
2750static int
2751xfs_finish_rename(
2752 struct xfs_trans *tp,
2753 struct xfs_defer_ops *dfops)
2754{
2755 int error;
2756
2757
2758
2759
2760
2761 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2762 xfs_trans_set_sync(tp);
2763
2764 error = xfs_defer_finish(&tp, dfops);
2765 if (error) {
2766 xfs_defer_cancel(dfops);
2767 xfs_trans_cancel(tp);
2768 return error;
2769 }
2770
2771 return xfs_trans_commit(tp);
2772}
2773
2774
2775
2776
2777
2778
2779STATIC int
2780xfs_cross_rename(
2781 struct xfs_trans *tp,
2782 struct xfs_inode *dp1,
2783 struct xfs_name *name1,
2784 struct xfs_inode *ip1,
2785 struct xfs_inode *dp2,
2786 struct xfs_name *name2,
2787 struct xfs_inode *ip2,
2788 struct xfs_defer_ops *dfops,
2789 xfs_fsblock_t *first_block,
2790 int spaceres)
2791{
2792 int error = 0;
2793 int ip1_flags = 0;
2794 int ip2_flags = 0;
2795 int dp2_flags = 0;
2796
2797
2798 error = xfs_dir_replace(tp, dp1, name1,
2799 ip2->i_ino,
2800 first_block, dfops, spaceres);
2801 if (error)
2802 goto out_trans_abort;
2803
2804
2805 error = xfs_dir_replace(tp, dp2, name2,
2806 ip1->i_ino,
2807 first_block, dfops, spaceres);
2808 if (error)
2809 goto out_trans_abort;
2810
2811
2812
2813
2814
2815
2816 if (dp1 != dp2) {
2817 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2818
2819 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2820 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2821 dp1->i_ino, first_block,
2822 dfops, spaceres);
2823 if (error)
2824 goto out_trans_abort;
2825
2826
2827 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2828 error = xfs_droplink(tp, dp2);
2829 if (error)
2830 goto out_trans_abort;
2831 error = xfs_bumplink(tp, dp1);
2832 if (error)
2833 goto out_trans_abort;
2834 }
2835
2836
2837
2838
2839
2840
2841
2842 ip1_flags |= XFS_ICHGTIME_CHG;
2843 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2844 }
2845
2846 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2847 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2848 dp2->i_ino, first_block,
2849 dfops, spaceres);
2850 if (error)
2851 goto out_trans_abort;
2852
2853
2854 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2855 error = xfs_droplink(tp, dp1);
2856 if (error)
2857 goto out_trans_abort;
2858 error = xfs_bumplink(tp, dp2);
2859 if (error)
2860 goto out_trans_abort;
2861 }
2862
2863
2864
2865
2866
2867
2868
2869 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2870 ip2_flags |= XFS_ICHGTIME_CHG;
2871 }
2872 }
2873
2874 if (ip1_flags) {
2875 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2876 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2877 }
2878 if (ip2_flags) {
2879 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2880 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2881 }
2882 if (dp2_flags) {
2883 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2884 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2885 }
2886 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2887 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2888 return xfs_finish_rename(tp, dfops);
2889
2890out_trans_abort:
2891 xfs_defer_cancel(dfops);
2892 xfs_trans_cancel(tp);
2893 return error;
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904static int
2905xfs_rename_alloc_whiteout(
2906 struct xfs_inode *dp,
2907 struct xfs_inode **wip)
2908{
2909 struct xfs_inode *tmpfile;
2910 int error;
2911
2912 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
2913 if (error)
2914 return error;
2915
2916
2917
2918
2919
2920
2921
2922 drop_nlink(VFS_I(tmpfile));
2923 xfs_setup_iops(tmpfile);
2924 xfs_finish_inode_setup(tmpfile);
2925 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2926
2927 *wip = tmpfile;
2928 return 0;
2929}
2930
2931
2932
2933
2934int
2935xfs_rename(
2936 struct xfs_inode *src_dp,
2937 struct xfs_name *src_name,
2938 struct xfs_inode *src_ip,
2939 struct xfs_inode *target_dp,
2940 struct xfs_name *target_name,
2941 struct xfs_inode *target_ip,
2942 unsigned int flags)
2943{
2944 struct xfs_mount *mp = src_dp->i_mount;
2945 struct xfs_trans *tp;
2946 struct xfs_defer_ops dfops;
2947 xfs_fsblock_t first_block;
2948 struct xfs_inode *wip = NULL;
2949 struct xfs_inode *inodes[__XFS_SORT_INODES];
2950 int num_inodes = __XFS_SORT_INODES;
2951 bool new_parent = (src_dp != target_dp);
2952 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2953 int spaceres;
2954 int error;
2955
2956 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2957
2958 if ((flags & RENAME_EXCHANGE) && !target_ip)
2959 return -EINVAL;
2960
2961
2962
2963
2964
2965
2966 if (flags & RENAME_WHITEOUT) {
2967 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2968 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2969 if (error)
2970 return error;
2971
2972
2973 src_name->type = XFS_DIR3_FT_CHRDEV;
2974 }
2975
2976 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2977 inodes, &num_inodes);
2978
2979 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2980 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2981 if (error == -ENOSPC) {
2982 spaceres = 0;
2983 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2984 &tp);
2985 }
2986 if (error)
2987 goto out_release_wip;
2988
2989
2990
2991
2992 error = xfs_qm_vop_rename_dqattach(inodes);
2993 if (error)
2994 goto out_trans_cancel;
2995
2996
2997
2998
2999
3000
3001
3002 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3003
3004
3005
3006
3007
3008
3009 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3010 if (new_parent)
3011 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3012 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3013 if (target_ip)
3014 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3015 if (wip)
3016 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3017
3018
3019
3020
3021
3022
3023 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3024 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
3025 error = -EXDEV;
3026 goto out_trans_cancel;
3027 }
3028
3029 xfs_defer_init(&dfops, &first_block);
3030 tp->t_agfl_dfops = &dfops;
3031
3032
3033 if (flags & RENAME_EXCHANGE)
3034 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3035 target_dp, target_name, target_ip,
3036 &dfops, &first_block, spaceres);
3037
3038
3039
3040
3041 if (target_ip == NULL) {
3042
3043
3044
3045
3046 if (!spaceres) {
3047 error = xfs_dir_canenter(tp, target_dp, target_name);
3048 if (error)
3049 goto out_trans_cancel;
3050 }
3051
3052
3053
3054
3055
3056 error = xfs_dir_createname(tp, target_dp, target_name,
3057 src_ip->i_ino, &first_block,
3058 &dfops, spaceres);
3059 if (error)
3060 goto out_bmap_cancel;
3061
3062 xfs_trans_ichgtime(tp, target_dp,
3063 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3064
3065 if (new_parent && src_is_directory) {
3066 error = xfs_bumplink(tp, target_dp);
3067 if (error)
3068 goto out_bmap_cancel;
3069 }
3070 } else {
3071
3072
3073
3074
3075
3076 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
3077
3078
3079
3080 if (!(xfs_dir_isempty(target_ip)) ||
3081 (VFS_I(target_ip)->i_nlink > 2)) {
3082 error = -EEXIST;
3083 goto out_trans_cancel;
3084 }
3085 }
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096 error = xfs_dir_replace(tp, target_dp, target_name,
3097 src_ip->i_ino,
3098 &first_block, &dfops, spaceres);
3099 if (error)
3100 goto out_bmap_cancel;
3101
3102 xfs_trans_ichgtime(tp, target_dp,
3103 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3104
3105
3106
3107
3108
3109 error = xfs_droplink(tp, target_ip);
3110 if (error)
3111 goto out_bmap_cancel;
3112
3113 if (src_is_directory) {
3114
3115
3116
3117 error = xfs_droplink(tp, target_ip);
3118 if (error)
3119 goto out_bmap_cancel;
3120 }
3121 }
3122
3123
3124
3125
3126 if (new_parent && src_is_directory) {
3127
3128
3129
3130
3131 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3132 target_dp->i_ino,
3133 &first_block, &dfops, spaceres);
3134 ASSERT(error != -EEXIST);
3135 if (error)
3136 goto out_bmap_cancel;
3137 }
3138
3139
3140
3141
3142
3143
3144
3145
3146 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3147 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3148
3149
3150
3151
3152
3153
3154 if (src_is_directory && (new_parent || target_ip != NULL)) {
3155
3156
3157
3158
3159
3160 error = xfs_droplink(tp, src_dp);
3161 if (error)
3162 goto out_bmap_cancel;
3163 }
3164
3165
3166
3167
3168
3169
3170 if (wip) {
3171 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3172 &first_block, &dfops, spaceres);
3173 } else
3174 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3175 &first_block, &dfops, spaceres);
3176 if (error)
3177 goto out_bmap_cancel;
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187 if (wip) {
3188 ASSERT(VFS_I(wip)->i_nlink == 0);
3189 error = xfs_bumplink(tp, wip);
3190 if (error)
3191 goto out_bmap_cancel;
3192 error = xfs_iunlink_remove(tp, wip);
3193 if (error)
3194 goto out_bmap_cancel;
3195 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
3196
3197
3198
3199
3200
3201
3202 VFS_I(wip)->i_state &= ~I_LINKABLE;
3203 }
3204
3205 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3206 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3207 if (new_parent)
3208 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3209
3210 error = xfs_finish_rename(tp, &dfops);
3211 if (wip)
3212 IRELE(wip);
3213 return error;
3214
3215out_bmap_cancel:
3216 xfs_defer_cancel(&dfops);
3217out_trans_cancel:
3218 xfs_trans_cancel(tp);
3219out_release_wip:
3220 if (wip)
3221 IRELE(wip);
3222 return error;
3223}
3224
3225STATIC int
3226xfs_iflush_cluster(
3227 struct xfs_inode *ip,
3228 struct xfs_buf *bp)
3229{
3230 struct xfs_mount *mp = ip->i_mount;
3231 struct xfs_perag *pag;
3232 unsigned long first_index, mask;
3233 unsigned long inodes_per_cluster;
3234 int cilist_size;
3235 struct xfs_inode **cilist;
3236 struct xfs_inode *cip;
3237 int nr_found;
3238 int clcount = 0;
3239 int i;
3240
3241 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3242
3243 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
3244 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3245 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3246 if (!cilist)
3247 goto out_put;
3248
3249 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
3250 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3251 rcu_read_lock();
3252
3253 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
3254 first_index, inodes_per_cluster);
3255 if (nr_found == 0)
3256 goto out_free;
3257
3258 for (i = 0; i < nr_found; i++) {
3259 cip = cilist[i];
3260 if (cip == ip)
3261 continue;
3262
3263
3264
3265
3266
3267
3268
3269 spin_lock(&cip->i_flags_lock);
3270 if (!cip->i_ino ||
3271 __xfs_iflags_test(cip, XFS_ISTALE)) {
3272 spin_unlock(&cip->i_flags_lock);
3273 continue;
3274 }
3275
3276
3277
3278
3279
3280
3281 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3282 spin_unlock(&cip->i_flags_lock);
3283 break;
3284 }
3285 spin_unlock(&cip->i_flags_lock);
3286
3287
3288
3289
3290
3291
3292 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3293 continue;
3294
3295
3296
3297
3298
3299
3300 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3301 continue;
3302 if (!xfs_iflock_nowait(cip)) {
3303 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3304 continue;
3305 }
3306 if (xfs_ipincount(cip)) {
3307 xfs_ifunlock(cip);
3308 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3309 continue;
3310 }
3311
3312
3313
3314
3315
3316
3317
3318
3319 if (!cip->i_ino) {
3320 xfs_ifunlock(cip);
3321 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3322 continue;
3323 }
3324
3325
3326
3327
3328
3329 if (!xfs_inode_clean(cip)) {
3330 int error;
3331 error = xfs_iflush_int(cip, bp);
3332 if (error) {
3333 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3334 goto cluster_corrupt_out;
3335 }
3336 clcount++;
3337 } else {
3338 xfs_ifunlock(cip);
3339 }
3340 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3341 }
3342
3343 if (clcount) {
3344 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3345 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3346 }
3347
3348out_free:
3349 rcu_read_unlock();
3350 kmem_free(cilist);
3351out_put:
3352 xfs_perag_put(pag);
3353 return 0;
3354
3355
3356cluster_corrupt_out:
3357
3358
3359
3360
3361 rcu_read_unlock();
3362 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3363
3364
3365
3366
3367
3368
3369
3370
3371 ASSERT(bp->b_iodone);
3372 bp->b_flags &= ~XBF_DONE;
3373 xfs_buf_stale(bp);
3374 xfs_buf_ioerror(bp, -EIO);
3375 xfs_buf_ioend(bp);
3376
3377
3378 xfs_iflush_abort(cip, false);
3379 kmem_free(cilist);
3380 xfs_perag_put(pag);
3381 return -EFSCORRUPTED;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393int
3394xfs_iflush(
3395 struct xfs_inode *ip,
3396 struct xfs_buf **bpp)
3397{
3398 struct xfs_mount *mp = ip->i_mount;
3399 struct xfs_buf *bp = NULL;
3400 struct xfs_dinode *dip;
3401 int error;
3402
3403 XFS_STATS_INC(mp, xs_iflush_count);
3404
3405 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3406 ASSERT(xfs_isiflocked(ip));
3407 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3408 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3409
3410 *bpp = NULL;
3411
3412 xfs_iunpin_wait(ip);
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3423 xfs_ifunlock(ip);
3424 return 0;
3425 }
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435 if (XFS_FORCED_SHUTDOWN(mp)) {
3436 error = -EIO;
3437 goto abort_out;
3438 }
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3450 0);
3451 if (error == -EAGAIN) {
3452 xfs_ifunlock(ip);
3453 return error;
3454 }
3455 if (error)
3456 goto corrupt_out;
3457
3458
3459
3460
3461 error = xfs_iflush_int(ip, bp);
3462 if (error)
3463 goto corrupt_out;
3464
3465
3466
3467
3468
3469 if (xfs_buf_ispinned(bp))
3470 xfs_log_force(mp, 0);
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481 error = xfs_iflush_cluster(ip, bp);
3482 if (error)
3483 return error;
3484
3485 *bpp = bp;
3486 return 0;
3487
3488corrupt_out:
3489 if (bp)
3490 xfs_buf_relse(bp);
3491 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3492abort_out:
3493
3494 xfs_iflush_abort(ip, false);
3495 return error;
3496}
3497
3498
3499
3500
3501
3502bool
3503xfs_inode_verify_forks(
3504 struct xfs_inode *ip)
3505{
3506 struct xfs_ifork *ifp;
3507 xfs_failaddr_t fa;
3508
3509 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3510 if (fa) {
3511 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3512 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3513 ifp->if_u1.if_data, ifp->if_bytes, fa);
3514 return false;
3515 }
3516
3517 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3518 if (fa) {
3519 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3520 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3521 ifp ? ifp->if_u1.if_data : NULL,
3522 ifp ? ifp->if_bytes : 0, fa);
3523 return false;
3524 }
3525 return true;
3526}
3527
3528STATIC int
3529xfs_iflush_int(
3530 struct xfs_inode *ip,
3531 struct xfs_buf *bp)
3532{
3533 struct xfs_inode_log_item *iip = ip->i_itemp;
3534 struct xfs_dinode *dip;
3535 struct xfs_mount *mp = ip->i_mount;
3536
3537 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3538 ASSERT(xfs_isiflocked(ip));
3539 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3540 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3541 ASSERT(iip != NULL && iip->ili_fields != 0);
3542 ASSERT(ip->i_d.di_version > 1);
3543
3544
3545 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3546
3547 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3548 mp, XFS_ERRTAG_IFLUSH_1)) {
3549 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3550 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3551 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3552 goto corrupt_out;
3553 }
3554 if (S_ISREG(VFS_I(ip)->i_mode)) {
3555 if (XFS_TEST_ERROR(
3556 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3557 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3558 mp, XFS_ERRTAG_IFLUSH_3)) {
3559 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3560 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3561 __func__, ip->i_ino, ip);
3562 goto corrupt_out;
3563 }
3564 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3565 if (XFS_TEST_ERROR(
3566 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3567 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3568 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3569 mp, XFS_ERRTAG_IFLUSH_4)) {
3570 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3571 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3572 __func__, ip->i_ino, ip);
3573 goto corrupt_out;
3574 }
3575 }
3576 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3577 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3578 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3579 "%s: detected corrupt incore inode %Lu, "
3580 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3581 __func__, ip->i_ino,
3582 ip->i_d.di_nextents + ip->i_d.di_anextents,
3583 ip->i_d.di_nblocks, ip);
3584 goto corrupt_out;
3585 }
3586 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3587 mp, XFS_ERRTAG_IFLUSH_6)) {
3588 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3589 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3590 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3591 goto corrupt_out;
3592 }
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603 if (ip->i_d.di_version < 3)
3604 ip->i_d.di_flushiter++;
3605
3606
3607 if (!xfs_inode_verify_forks(ip))
3608 goto corrupt_out;
3609
3610
3611
3612
3613
3614
3615 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3616
3617
3618 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3619 ip->i_d.di_flushiter = 0;
3620
3621 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3622 if (XFS_IFORK_Q(ip))
3623 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3624 xfs_inobp_check(mp, bp);
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651 iip->ili_last_fields = iip->ili_fields;
3652 iip->ili_fields = 0;
3653 iip->ili_fsync_fields = 0;
3654 iip->ili_logged = 1;
3655
3656 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3657 &iip->ili_item.li_lsn);
3658
3659
3660
3661
3662
3663
3664
3665 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3666
3667
3668 xfs_dinode_calc_crc(mp, dip);
3669
3670 ASSERT(!list_empty(&bp->b_li_list));
3671 ASSERT(bp->b_iodone != NULL);
3672 return 0;
3673
3674corrupt_out:
3675 return -EFSCORRUPTED;
3676}
3677