1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/iomap.h>
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
26#include "xfs_mount.h"
27#include "xfs_defer.h"
28#include "xfs_inode.h"
29#include "xfs_btree.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_bmap.h"
32#include "xfs_bmap_util.h"
33#include "xfs_error.h"
34#include "xfs_trans.h"
35#include "xfs_trans_space.h"
36#include "xfs_iomap.h"
37#include "xfs_trace.h"
38#include "xfs_icache.h"
39#include "xfs_quota.h"
40#include "xfs_dquot_item.h"
41#include "xfs_dquot.h"
42#include "xfs_reflink.h"
43
44
45#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
46 << mp->m_writeio_log)
47
48void
49xfs_bmbt_to_iomap(
50 struct xfs_inode *ip,
51 struct iomap *iomap,
52 struct xfs_bmbt_irec *imap)
53{
54 struct xfs_mount *mp = ip->i_mount;
55
56 if (imap->br_startblock == HOLESTARTBLOCK) {
57 iomap->blkno = IOMAP_NULL_BLOCK;
58 iomap->type = IOMAP_HOLE;
59 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
60 iomap->blkno = IOMAP_NULL_BLOCK;
61 iomap->type = IOMAP_DELALLOC;
62 } else {
63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
64 if (imap->br_state == XFS_EXT_UNWRITTEN)
65 iomap->type = IOMAP_UNWRITTEN;
66 else
67 iomap->type = IOMAP_MAPPED;
68 }
69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
72 iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
73}
74
75xfs_extlen_t
76xfs_eof_alignment(
77 struct xfs_inode *ip,
78 xfs_extlen_t extsize)
79{
80 struct xfs_mount *mp = ip->i_mount;
81 xfs_extlen_t align = 0;
82
83 if (!XFS_IS_REALTIME_INODE(ip)) {
84
85
86
87
88
89
90
91
92 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
93 align = mp->m_swidth;
94 else if (mp->m_dalign)
95 align = mp->m_dalign;
96
97 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
98 align = 0;
99 }
100
101
102
103
104
105 if (extsize) {
106 if (align)
107 align = roundup_64(align, extsize);
108 else
109 align = extsize;
110 }
111
112 return align;
113}
114
115STATIC int
116xfs_iomap_eof_align_last_fsb(
117 struct xfs_inode *ip,
118 xfs_extlen_t extsize,
119 xfs_fileoff_t *last_fsb)
120{
121 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
122
123 if (align) {
124 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
125 int eof, error;
126
127 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
128 if (error)
129 return error;
130 if (eof)
131 *last_fsb = new_last_fsb;
132 }
133 return 0;
134}
135
136STATIC int
137xfs_alert_fsblock_zero(
138 xfs_inode_t *ip,
139 xfs_bmbt_irec_t *imap)
140{
141 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
142 "Access to block zero in inode %llu "
143 "start_block: %llx start_off: %llx "
144 "blkcnt: %llx extent-state: %x",
145 (unsigned long long)ip->i_ino,
146 (unsigned long long)imap->br_startblock,
147 (unsigned long long)imap->br_startoff,
148 (unsigned long long)imap->br_blockcount,
149 imap->br_state);
150 return -EFSCORRUPTED;
151}
152
153int
154xfs_iomap_write_direct(
155 xfs_inode_t *ip,
156 xfs_off_t offset,
157 size_t count,
158 xfs_bmbt_irec_t *imap,
159 int nmaps)
160{
161 xfs_mount_t *mp = ip->i_mount;
162 xfs_fileoff_t offset_fsb;
163 xfs_fileoff_t last_fsb;
164 xfs_filblks_t count_fsb, resaligned;
165 xfs_fsblock_t firstfsb;
166 xfs_extlen_t extsz;
167 int nimaps;
168 int quota_flag;
169 int rt;
170 xfs_trans_t *tp;
171 struct xfs_defer_ops dfops;
172 uint qblocks, resblks, resrtextents;
173 int error;
174 int lockmode;
175 int bmapi_flags = XFS_BMAPI_PREALLOC;
176 uint tflags = 0;
177
178 rt = XFS_IS_REALTIME_INODE(ip);
179 extsz = xfs_get_extsz_hint(ip);
180 lockmode = XFS_ILOCK_SHARED;
181
182 ASSERT(xfs_isilocked(ip, lockmode));
183
184 offset_fsb = XFS_B_TO_FSBT(mp, offset);
185 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
186 if ((offset + count) > XFS_ISIZE(ip)) {
187
188
189
190
191
192
193
194 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
195 XFS_IFEXTENTS);
196 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
197 if (error)
198 goto out_unlock;
199 } else {
200 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
201 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
202 imap->br_blockcount +
203 imap->br_startoff);
204 }
205 count_fsb = last_fsb - offset_fsb;
206 ASSERT(count_fsb > 0);
207 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
208
209 if (unlikely(rt)) {
210 resrtextents = qblocks = resaligned;
211 resrtextents /= mp->m_sb.sb_rextsize;
212 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
213 quota_flag = XFS_QMOPT_RES_RTBLKS;
214 } else {
215 resrtextents = 0;
216 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
217 quota_flag = XFS_QMOPT_RES_REGBLKS;
218 }
219
220
221
222
223
224 xfs_iunlock(ip, lockmode);
225 error = xfs_qm_dqattach(ip, 0);
226 if (error)
227 return error;
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242 if (IS_DAX(VFS_I(ip))) {
243 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
244 if (imap->br_state == XFS_EXT_UNWRITTEN) {
245 tflags |= XFS_TRANS_RESERVE;
246 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
247 }
248 }
249 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
250 tflags, &tp);
251 if (error)
252 return error;
253
254 lockmode = XFS_ILOCK_EXCL;
255 xfs_ilock(ip, lockmode);
256
257 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
258 if (error)
259 goto out_trans_cancel;
260
261 xfs_trans_ijoin(tp, ip, 0);
262
263
264
265
266
267 xfs_defer_init(&dfops, &firstfsb);
268 nimaps = 1;
269 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
270 bmapi_flags, &firstfsb, resblks, imap,
271 &nimaps, &dfops);
272 if (error)
273 goto out_bmap_cancel;
274
275
276
277
278 error = xfs_defer_finish(&tp, &dfops);
279 if (error)
280 goto out_bmap_cancel;
281
282 error = xfs_trans_commit(tp);
283 if (error)
284 goto out_unlock;
285
286
287
288
289 if (nimaps == 0) {
290 error = -ENOSPC;
291 goto out_unlock;
292 }
293
294 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
295 error = xfs_alert_fsblock_zero(ip, imap);
296
297out_unlock:
298 xfs_iunlock(ip, lockmode);
299 return error;
300
301out_bmap_cancel:
302 xfs_defer_cancel(&dfops);
303 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
304out_trans_cancel:
305 xfs_trans_cancel(tp);
306 goto out_unlock;
307}
308
309STATIC bool
310xfs_quota_need_throttle(
311 struct xfs_inode *ip,
312 int type,
313 xfs_fsblock_t alloc_blocks)
314{
315 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
316
317 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
318 return false;
319
320
321 if (!dq->q_prealloc_hi_wmark)
322 return false;
323
324
325 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
326 return false;
327
328 return true;
329}
330
331STATIC void
332xfs_quota_calc_throttle(
333 struct xfs_inode *ip,
334 int type,
335 xfs_fsblock_t *qblocks,
336 int *qshift,
337 int64_t *qfreesp)
338{
339 int64_t freesp;
340 int shift = 0;
341 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
342
343
344 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
345 *qblocks = 0;
346 *qfreesp = 0;
347 return;
348 }
349
350 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
351 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
352 shift = 2;
353 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
354 shift += 2;
355 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
356 shift += 2;
357 }
358
359 if (freesp < *qfreesp)
360 *qfreesp = freesp;
361
362
363 if ((freesp >> shift) < (*qblocks >> *qshift)) {
364 *qblocks = freesp;
365 *qshift = shift;
366 }
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387STATIC xfs_fsblock_t
388xfs_iomap_prealloc_size(
389 struct xfs_inode *ip,
390 loff_t offset,
391 loff_t count,
392 xfs_extnum_t idx)
393{
394 struct xfs_mount *mp = ip->i_mount;
395 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
396 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
397 struct xfs_bmbt_irec prev;
398 int shift = 0;
399 int64_t freesp;
400 xfs_fsblock_t qblocks;
401 int qshift = 0;
402 xfs_fsblock_t alloc_blocks = 0;
403
404 if (offset + count <= XFS_ISIZE(ip))
405 return 0;
406
407 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
408 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
409 return 0;
410
411
412
413
414
415 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
416 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
417 !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
418 prev.br_startoff + prev.br_blockcount < offset_fsb)
419 return mp->m_writeio_blocks;
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437 if (prev.br_blockcount <= (MAXEXTLEN >> 1))
438 alloc_blocks = prev.br_blockcount << 1;
439 else
440 alloc_blocks = XFS_B_TO_FSB(mp, offset);
441 if (!alloc_blocks)
442 goto check_writeio;
443 qblocks = alloc_blocks;
444
445
446
447
448
449
450
451
452 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
453 alloc_blocks);
454
455 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
456 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
457 shift = 2;
458 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
459 shift++;
460 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
461 shift++;
462 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
463 shift++;
464 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
465 shift++;
466 }
467
468
469
470
471
472 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
473 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
474 &freesp);
475 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
476 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
477 &freesp);
478 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
479 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
480 &freesp);
481
482
483
484
485
486
487
488
489 alloc_blocks = MIN(alloc_blocks, qblocks);
490 shift = MAX(shift, qshift);
491
492 if (shift)
493 alloc_blocks >>= shift;
494
495
496
497
498 if (alloc_blocks)
499 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
500 if (alloc_blocks > MAXEXTLEN)
501 alloc_blocks = MAXEXTLEN;
502
503
504
505
506
507
508
509 while (alloc_blocks && alloc_blocks >= freesp)
510 alloc_blocks >>= 4;
511check_writeio:
512 if (alloc_blocks < mp->m_writeio_blocks)
513 alloc_blocks = mp->m_writeio_blocks;
514 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
515 mp->m_writeio_blocks);
516 return alloc_blocks;
517}
518
519static int
520xfs_file_iomap_begin_delay(
521 struct inode *inode,
522 loff_t offset,
523 loff_t count,
524 struct iomap *iomap)
525{
526 struct xfs_inode *ip = XFS_I(inode);
527 struct xfs_mount *mp = ip->i_mount;
528 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
529 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
530 xfs_fileoff_t maxbytes_fsb =
531 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
532 xfs_fileoff_t end_fsb;
533 int error = 0, eof = 0;
534 struct xfs_bmbt_irec got;
535 xfs_extnum_t idx;
536 xfs_fsblock_t prealloc_blocks = 0;
537
538 ASSERT(!XFS_IS_REALTIME_INODE(ip));
539 ASSERT(!xfs_get_extsz_hint(ip));
540
541 xfs_ilock(ip, XFS_ILOCK_EXCL);
542
543 if (unlikely(XFS_TEST_ERROR(
544 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
545 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
546 mp, XFS_ERRTAG_BMAPIFORMAT))) {
547 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
548 error = -EFSCORRUPTED;
549 goto out_unlock;
550 }
551
552 XFS_STATS_INC(mp, xs_blk_mapw);
553
554 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
555 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
556 if (error)
557 goto out_unlock;
558 }
559
560 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
561 if (!eof && got.br_startoff <= offset_fsb) {
562 if (xfs_is_reflink_inode(ip)) {
563 bool shared;
564
565 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
566 maxbytes_fsb);
567 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
568 error = xfs_reflink_reserve_cow(ip, &got, &shared);
569 if (error)
570 goto out_unlock;
571 }
572
573 trace_xfs_iomap_found(ip, offset, count, 0, &got);
574 goto done;
575 }
576
577 error = xfs_qm_dqattach_locked(ip, 0);
578 if (error)
579 goto out_unlock;
580
581
582
583
584
585
586
587
588
589
590 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
591 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
592
593 if (eof) {
594 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
595 if (prealloc_blocks) {
596 xfs_extlen_t align;
597 xfs_off_t end_offset;
598 xfs_fileoff_t p_end_fsb;
599
600 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
601 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
602 prealloc_blocks;
603
604 align = xfs_eof_alignment(ip, 0);
605 if (align)
606 p_end_fsb = roundup_64(p_end_fsb, align);
607
608 p_end_fsb = min(p_end_fsb, maxbytes_fsb);
609 ASSERT(p_end_fsb > offset_fsb);
610 prealloc_blocks = p_end_fsb - end_fsb;
611 }
612 }
613
614retry:
615 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
616 end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
617 switch (error) {
618 case 0:
619 break;
620 case -ENOSPC:
621 case -EDQUOT:
622
623 trace_xfs_delalloc_enospc(ip, offset, count);
624 if (prealloc_blocks) {
625 prealloc_blocks = 0;
626 goto retry;
627 }
628
629 default:
630 goto out_unlock;
631 }
632
633
634
635
636
637 iomap->flags = IOMAP_F_NEW;
638 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
639done:
640 if (isnullstartblock(got.br_startblock))
641 got.br_startblock = DELAYSTARTBLOCK;
642
643 if (!got.br_startblock) {
644 error = xfs_alert_fsblock_zero(ip, &got);
645 if (error)
646 goto out_unlock;
647 }
648
649 xfs_bmbt_to_iomap(ip, iomap, &got);
650
651out_unlock:
652 xfs_iunlock(ip, XFS_ILOCK_EXCL);
653 return error;
654}
655
656
657
658
659
660
661
662
663
664
665
666int
667xfs_iomap_write_allocate(
668 xfs_inode_t *ip,
669 int whichfork,
670 xfs_off_t offset,
671 xfs_bmbt_irec_t *imap)
672{
673 xfs_mount_t *mp = ip->i_mount;
674 xfs_fileoff_t offset_fsb, last_block;
675 xfs_fileoff_t end_fsb, map_start_fsb;
676 xfs_fsblock_t first_block;
677 struct xfs_defer_ops dfops;
678 xfs_filblks_t count_fsb;
679 xfs_trans_t *tp;
680 int nimaps;
681 int error = 0;
682 int flags = XFS_BMAPI_DELALLOC;
683 int nres;
684
685 if (whichfork == XFS_COW_FORK)
686 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
687
688
689
690
691 error = xfs_qm_dqattach(ip, 0);
692 if (error)
693 return error;
694
695 offset_fsb = XFS_B_TO_FSBT(mp, offset);
696 count_fsb = imap->br_blockcount;
697 map_start_fsb = imap->br_startoff;
698
699 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
700
701 while (count_fsb != 0) {
702
703
704
705
706
707
708
709
710 nimaps = 0;
711 while (nimaps == 0) {
712 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
713
714
715
716
717
718
719 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
720 0, XFS_TRANS_RESERVE, &tp);
721 if (error)
722 return error;
723
724 xfs_ilock(ip, XFS_ILOCK_EXCL);
725 xfs_trans_ijoin(tp, ip, 0);
726
727 xfs_defer_init(&dfops, &first_block);
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760 nimaps = 1;
761 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
762 error = xfs_bmap_last_offset(ip, &last_block,
763 XFS_DATA_FORK);
764 if (error)
765 goto trans_cancel;
766
767 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
768 if ((map_start_fsb + count_fsb) > last_block) {
769 count_fsb = last_block - map_start_fsb;
770 if (count_fsb == 0) {
771 error = -EAGAIN;
772 goto trans_cancel;
773 }
774 }
775
776
777
778
779
780 error = xfs_bmapi_write(tp, ip, map_start_fsb,
781 count_fsb, flags, &first_block,
782 nres, imap, &nimaps,
783 &dfops);
784 if (error)
785 goto trans_cancel;
786
787 error = xfs_defer_finish(&tp, &dfops);
788 if (error)
789 goto trans_cancel;
790
791 error = xfs_trans_commit(tp);
792 if (error)
793 goto error0;
794
795 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796 }
797
798
799
800
801
802 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
803 return xfs_alert_fsblock_zero(ip, imap);
804
805 if ((offset_fsb >= imap->br_startoff) &&
806 (offset_fsb < (imap->br_startoff +
807 imap->br_blockcount))) {
808 XFS_STATS_INC(mp, xs_xstrat_quick);
809 return 0;
810 }
811
812
813
814
815
816 count_fsb -= imap->br_blockcount;
817 map_start_fsb = imap->br_startoff + imap->br_blockcount;
818 }
819
820trans_cancel:
821 xfs_defer_cancel(&dfops);
822 xfs_trans_cancel(tp);
823error0:
824 xfs_iunlock(ip, XFS_ILOCK_EXCL);
825 return error;
826}
827
828int
829xfs_iomap_write_unwritten(
830 xfs_inode_t *ip,
831 xfs_off_t offset,
832 xfs_off_t count,
833 bool update_isize)
834{
835 xfs_mount_t *mp = ip->i_mount;
836 xfs_fileoff_t offset_fsb;
837 xfs_filblks_t count_fsb;
838 xfs_filblks_t numblks_fsb;
839 xfs_fsblock_t firstfsb;
840 int nimaps;
841 xfs_trans_t *tp;
842 xfs_bmbt_irec_t imap;
843 struct xfs_defer_ops dfops;
844 struct inode *inode = VFS_I(ip);
845 xfs_fsize_t i_size;
846 uint resblks;
847 int error;
848
849 trace_xfs_unwritten_convert(ip, offset, count);
850
851 offset_fsb = XFS_B_TO_FSBT(mp, offset);
852 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
853 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
854
855
856
857
858
859
860
861
862
863
864
865 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
866
867 do {
868
869
870
871
872
873
874
875
876
877 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
878 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
879 if (error)
880 return error;
881
882 xfs_ilock(ip, XFS_ILOCK_EXCL);
883 xfs_trans_ijoin(tp, ip, 0);
884
885
886
887
888 xfs_defer_init(&dfops, &firstfsb);
889 nimaps = 1;
890 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
891 XFS_BMAPI_CONVERT, &firstfsb, resblks,
892 &imap, &nimaps, &dfops);
893 if (error)
894 goto error_on_bmapi_transaction;
895
896
897
898
899
900
901 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
902 if (i_size > offset + count)
903 i_size = offset + count;
904 if (update_isize && i_size > i_size_read(inode))
905 i_size_write(inode, i_size);
906 i_size = xfs_new_eof(ip, i_size);
907 if (i_size) {
908 ip->i_d.di_size = i_size;
909 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
910 }
911
912 error = xfs_defer_finish(&tp, &dfops);
913 if (error)
914 goto error_on_bmapi_transaction;
915
916 error = xfs_trans_commit(tp);
917 xfs_iunlock(ip, XFS_ILOCK_EXCL);
918 if (error)
919 return error;
920
921 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
922 return xfs_alert_fsblock_zero(ip, &imap);
923
924 if ((numblks_fsb = imap.br_blockcount) == 0) {
925
926
927
928
929 ASSERT(imap.br_blockcount);
930 break;
931 }
932 offset_fsb += numblks_fsb;
933 count_fsb -= numblks_fsb;
934 } while (count_fsb > 0);
935
936 return 0;
937
938error_on_bmapi_transaction:
939 xfs_defer_cancel(&dfops);
940 xfs_trans_cancel(tp);
941 xfs_iunlock(ip, XFS_ILOCK_EXCL);
942 return error;
943}
944
945static inline bool imap_needs_alloc(struct inode *inode,
946 struct xfs_bmbt_irec *imap, int nimaps)
947{
948 return !nimaps ||
949 imap->br_startblock == HOLESTARTBLOCK ||
950 imap->br_startblock == DELAYSTARTBLOCK ||
951 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
952}
953
954static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
955{
956
957
958
959
960 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
961 return true;
962 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE))
963 return true;
964 return false;
965}
966
967static int
968xfs_file_iomap_begin(
969 struct inode *inode,
970 loff_t offset,
971 loff_t length,
972 unsigned flags,
973 struct iomap *iomap)
974{
975 struct xfs_inode *ip = XFS_I(inode);
976 struct xfs_mount *mp = ip->i_mount;
977 struct xfs_bmbt_irec imap;
978 xfs_fileoff_t offset_fsb, end_fsb;
979 int nimaps = 1, error = 0;
980 bool shared = false, trimmed = false;
981 unsigned lockmode;
982
983 if (XFS_FORCED_SHUTDOWN(mp))
984 return -EIO;
985
986 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
987 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
988
989 return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
990 }
991
992 if (need_excl_ilock(ip, flags)) {
993 lockmode = XFS_ILOCK_EXCL;
994 xfs_ilock(ip, XFS_ILOCK_EXCL);
995 } else {
996 lockmode = xfs_ilock_data_map_shared(ip);
997 }
998
999 if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) {
1000 error = -EAGAIN;
1001 goto out_unlock;
1002 }
1003
1004 ASSERT(offset <= mp->m_super->s_maxbytes);
1005 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
1006 length = mp->m_super->s_maxbytes - offset;
1007 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1008 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1009
1010 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1011 &nimaps, 0);
1012 if (error)
1013 goto out_unlock;
1014
1015 if (flags & IOMAP_REPORT) {
1016
1017 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1018 &trimmed);
1019 if (error)
1020 goto out_unlock;
1021 }
1022
1023 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
1024 if (flags & IOMAP_DIRECT) {
1025
1026
1027
1028
1029
1030 if (flags & IOMAP_NOWAIT) {
1031 error = -EAGAIN;
1032 goto out_unlock;
1033 }
1034
1035 error = xfs_reflink_allocate_cow(ip, &imap, &shared,
1036 &lockmode);
1037 if (error)
1038 goto out_unlock;
1039 } else {
1040 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1041 if (error)
1042 goto out_unlock;
1043 }
1044
1045 end_fsb = imap.br_startoff + imap.br_blockcount;
1046 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1047 }
1048
1049 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
1050
1051
1052
1053
1054 if (flags & IOMAP_NOWAIT) {
1055 error = -EAGAIN;
1056 goto out_unlock;
1057 }
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1069
1070
1071
1072
1073 if (lockmode == XFS_ILOCK_EXCL)
1074 xfs_ilock_demote(ip, lockmode);
1075 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1076 nimaps);
1077 if (error)
1078 return error;
1079
1080 iomap->flags = IOMAP_F_NEW;
1081 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1082 } else {
1083 ASSERT(nimaps);
1084
1085 xfs_iunlock(ip, lockmode);
1086 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1087 }
1088
1089 xfs_bmbt_to_iomap(ip, iomap, &imap);
1090
1091 if (shared)
1092 iomap->flags |= IOMAP_F_SHARED;
1093 return 0;
1094out_unlock:
1095 xfs_iunlock(ip, lockmode);
1096 return error;
1097}
1098
1099static int
1100xfs_file_iomap_end_delalloc(
1101 struct xfs_inode *ip,
1102 loff_t offset,
1103 loff_t length,
1104 ssize_t written,
1105 struct iomap *iomap)
1106{
1107 struct xfs_mount *mp = ip->i_mount;
1108 xfs_fileoff_t start_fsb;
1109 xfs_fileoff_t end_fsb;
1110 int error = 0;
1111
1112
1113
1114
1115
1116 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1117 iomap->flags |= IOMAP_F_NEW;
1118 written = 0;
1119 }
1120
1121
1122
1123
1124
1125
1126 if (unlikely(!written))
1127 start_fsb = XFS_B_TO_FSBT(mp, offset);
1128 else
1129 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1130 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1141 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1142 XFS_FSB_TO_B(mp, end_fsb) - 1);
1143
1144 xfs_ilock(ip, XFS_ILOCK_EXCL);
1145 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1146 end_fsb - start_fsb);
1147 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1148
1149 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1150 xfs_alert(mp, "%s: unable to clean up ino %lld",
1151 __func__, ip->i_ino);
1152 return error;
1153 }
1154 }
1155
1156 return 0;
1157}
1158
1159static int
1160xfs_file_iomap_end(
1161 struct inode *inode,
1162 loff_t offset,
1163 loff_t length,
1164 ssize_t written,
1165 unsigned flags,
1166 struct iomap *iomap)
1167{
1168 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1169 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1170 length, written, iomap);
1171 return 0;
1172}
1173
1174const struct iomap_ops xfs_iomap_ops = {
1175 .iomap_begin = xfs_file_iomap_begin,
1176 .iomap_end = xfs_file_iomap_end,
1177};
1178
1179static int
1180xfs_xattr_iomap_begin(
1181 struct inode *inode,
1182 loff_t offset,
1183 loff_t length,
1184 unsigned flags,
1185 struct iomap *iomap)
1186{
1187 struct xfs_inode *ip = XFS_I(inode);
1188 struct xfs_mount *mp = ip->i_mount;
1189 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1190 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1191 struct xfs_bmbt_irec imap;
1192 int nimaps = 1, error = 0;
1193 unsigned lockmode;
1194
1195 if (XFS_FORCED_SHUTDOWN(mp))
1196 return -EIO;
1197
1198 lockmode = xfs_ilock_attr_map_shared(ip);
1199
1200
1201 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1202 error = -ENOENT;
1203 goto out_unlock;
1204 }
1205
1206 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1207 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1208 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1209out_unlock:
1210 xfs_iunlock(ip, lockmode);
1211
1212 if (!error) {
1213 ASSERT(nimaps);
1214 xfs_bmbt_to_iomap(ip, iomap, &imap);
1215 }
1216
1217 return error;
1218}
1219
1220const struct iomap_ops xfs_xattr_iomap_ops = {
1221 .iomap_begin = xfs_xattr_iomap_begin,
1222};
1223