1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/iomap.h>
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
26#include "xfs_mount.h"
27#include "xfs_defer.h"
28#include "xfs_inode.h"
29#include "xfs_btree.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_bmap.h"
32#include "xfs_bmap_util.h"
33#include "xfs_errortag.h"
34#include "xfs_error.h"
35#include "xfs_trans.h"
36#include "xfs_trans_space.h"
37#include "xfs_inode_item.h"
38#include "xfs_iomap.h"
39#include "xfs_trace.h"
40#include "xfs_icache.h"
41#include "xfs_quota.h"
42#include "xfs_dquot_item.h"
43#include "xfs_dquot.h"
44#include "xfs_reflink.h"
45
46
47#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
48 << mp->m_writeio_log)
49
50void
51xfs_bmbt_to_iomap(
52 struct xfs_inode *ip,
53 struct iomap *iomap,
54 struct xfs_bmbt_irec *imap)
55{
56 struct xfs_mount *mp = ip->i_mount;
57
58 if (imap->br_startblock == HOLESTARTBLOCK) {
59 iomap->addr = IOMAP_NULL_ADDR;
60 iomap->type = IOMAP_HOLE;
61 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
62 iomap->addr = IOMAP_NULL_ADDR;
63 iomap->type = IOMAP_DELALLOC;
64 } else {
65 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
66 if (imap->br_state == XFS_EXT_UNWRITTEN)
67 iomap->type = IOMAP_UNWRITTEN;
68 else
69 iomap->type = IOMAP_MAPPED;
70 }
71 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
72 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
73 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
74 iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
75}
76
77xfs_extlen_t
78xfs_eof_alignment(
79 struct xfs_inode *ip,
80 xfs_extlen_t extsize)
81{
82 struct xfs_mount *mp = ip->i_mount;
83 xfs_extlen_t align = 0;
84
85 if (!XFS_IS_REALTIME_INODE(ip)) {
86
87
88
89
90
91
92
93
94 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
95 align = mp->m_swidth;
96 else if (mp->m_dalign)
97 align = mp->m_dalign;
98
99 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
100 align = 0;
101 }
102
103
104
105
106
107 if (extsize) {
108 if (align)
109 align = roundup_64(align, extsize);
110 else
111 align = extsize;
112 }
113
114 return align;
115}
116
117STATIC int
118xfs_iomap_eof_align_last_fsb(
119 struct xfs_inode *ip,
120 xfs_extlen_t extsize,
121 xfs_fileoff_t *last_fsb)
122{
123 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
124
125 if (align) {
126 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
127 int eof, error;
128
129 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
130 if (error)
131 return error;
132 if (eof)
133 *last_fsb = new_last_fsb;
134 }
135 return 0;
136}
137
138STATIC int
139xfs_alert_fsblock_zero(
140 xfs_inode_t *ip,
141 xfs_bmbt_irec_t *imap)
142{
143 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
144 "Access to block zero in inode %llu "
145 "start_block: %llx start_off: %llx "
146 "blkcnt: %llx extent-state: %x",
147 (unsigned long long)ip->i_ino,
148 (unsigned long long)imap->br_startblock,
149 (unsigned long long)imap->br_startoff,
150 (unsigned long long)imap->br_blockcount,
151 imap->br_state);
152 return -EFSCORRUPTED;
153}
154
155int
156xfs_iomap_write_direct(
157 xfs_inode_t *ip,
158 xfs_off_t offset,
159 size_t count,
160 xfs_bmbt_irec_t *imap,
161 int nmaps)
162{
163 xfs_mount_t *mp = ip->i_mount;
164 xfs_fileoff_t offset_fsb;
165 xfs_fileoff_t last_fsb;
166 xfs_filblks_t count_fsb, resaligned;
167 xfs_fsblock_t firstfsb;
168 xfs_extlen_t extsz;
169 int nimaps;
170 int quota_flag;
171 int rt;
172 xfs_trans_t *tp;
173 struct xfs_defer_ops dfops;
174 uint qblocks, resblks, resrtextents;
175 int error;
176 int lockmode;
177 int bmapi_flags = XFS_BMAPI_PREALLOC;
178 uint tflags = 0;
179
180 rt = XFS_IS_REALTIME_INODE(ip);
181 extsz = xfs_get_extsz_hint(ip);
182 lockmode = XFS_ILOCK_SHARED;
183
184 ASSERT(xfs_isilocked(ip, lockmode));
185
186 offset_fsb = XFS_B_TO_FSBT(mp, offset);
187 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
188 if ((offset + count) > XFS_ISIZE(ip)) {
189
190
191
192
193
194
195
196 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
197 XFS_IFEXTENTS);
198 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
199 if (error)
200 goto out_unlock;
201 } else {
202 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
203 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
204 imap->br_blockcount +
205 imap->br_startoff);
206 }
207 count_fsb = last_fsb - offset_fsb;
208 ASSERT(count_fsb > 0);
209 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
210
211 if (unlikely(rt)) {
212 resrtextents = qblocks = resaligned;
213 resrtextents /= mp->m_sb.sb_rextsize;
214 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
215 quota_flag = XFS_QMOPT_RES_RTBLKS;
216 } else {
217 resrtextents = 0;
218 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
219 quota_flag = XFS_QMOPT_RES_REGBLKS;
220 }
221
222
223
224
225
226 xfs_iunlock(ip, lockmode);
227 error = xfs_qm_dqattach(ip, 0);
228 if (error)
229 return error;
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 if (IS_DAX(VFS_I(ip))) {
245 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
246 if (imap->br_state == XFS_EXT_UNWRITTEN) {
247 tflags |= XFS_TRANS_RESERVE;
248 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
249 }
250 }
251 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
252 tflags, &tp);
253 if (error)
254 return error;
255
256 lockmode = XFS_ILOCK_EXCL;
257 xfs_ilock(ip, lockmode);
258
259 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
260 if (error)
261 goto out_trans_cancel;
262
263 xfs_trans_ijoin(tp, ip, 0);
264
265
266
267
268
269 xfs_defer_init(&dfops, &firstfsb);
270 nimaps = 1;
271 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
272 bmapi_flags, &firstfsb, resblks, imap,
273 &nimaps, &dfops);
274 if (error)
275 goto out_bmap_cancel;
276
277
278
279
280 error = xfs_defer_finish(&tp, &dfops);
281 if (error)
282 goto out_bmap_cancel;
283
284 error = xfs_trans_commit(tp);
285 if (error)
286 goto out_unlock;
287
288
289
290
291 if (nimaps == 0) {
292 error = -ENOSPC;
293 goto out_unlock;
294 }
295
296 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
297 error = xfs_alert_fsblock_zero(ip, imap);
298
299out_unlock:
300 xfs_iunlock(ip, lockmode);
301 return error;
302
303out_bmap_cancel:
304 xfs_defer_cancel(&dfops);
305 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
306out_trans_cancel:
307 xfs_trans_cancel(tp);
308 goto out_unlock;
309}
310
311STATIC bool
312xfs_quota_need_throttle(
313 struct xfs_inode *ip,
314 int type,
315 xfs_fsblock_t alloc_blocks)
316{
317 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
318
319 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
320 return false;
321
322
323 if (!dq->q_prealloc_hi_wmark)
324 return false;
325
326
327 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
328 return false;
329
330 return true;
331}
332
333STATIC void
334xfs_quota_calc_throttle(
335 struct xfs_inode *ip,
336 int type,
337 xfs_fsblock_t *qblocks,
338 int *qshift,
339 int64_t *qfreesp)
340{
341 int64_t freesp;
342 int shift = 0;
343 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
344
345
346 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
347 *qblocks = 0;
348 *qfreesp = 0;
349 return;
350 }
351
352 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
353 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
354 shift = 2;
355 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
356 shift += 2;
357 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
358 shift += 2;
359 }
360
361 if (freesp < *qfreesp)
362 *qfreesp = freesp;
363
364
365 if ((freesp >> shift) < (*qblocks >> *qshift)) {
366 *qblocks = freesp;
367 *qshift = shift;
368 }
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389STATIC xfs_fsblock_t
390xfs_iomap_prealloc_size(
391 struct xfs_inode *ip,
392 loff_t offset,
393 loff_t count,
394 struct xfs_iext_cursor *icur)
395{
396 struct xfs_mount *mp = ip->i_mount;
397 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
398 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
399 struct xfs_bmbt_irec prev;
400 int shift = 0;
401 int64_t freesp;
402 xfs_fsblock_t qblocks;
403 int qshift = 0;
404 xfs_fsblock_t alloc_blocks = 0;
405
406 if (offset + count <= XFS_ISIZE(ip))
407 return 0;
408
409 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
410 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
411 return 0;
412
413
414
415
416
417 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
418 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
419 !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
420 prev.br_startoff + prev.br_blockcount < offset_fsb)
421 return mp->m_writeio_blocks;
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 if (prev.br_blockcount <= (MAXEXTLEN >> 1))
440 alloc_blocks = prev.br_blockcount << 1;
441 else
442 alloc_blocks = XFS_B_TO_FSB(mp, offset);
443 if (!alloc_blocks)
444 goto check_writeio;
445 qblocks = alloc_blocks;
446
447
448
449
450
451
452
453
454 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
455 alloc_blocks);
456
457 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
458 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
459 shift = 2;
460 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
461 shift++;
462 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
463 shift++;
464 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
465 shift++;
466 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
467 shift++;
468 }
469
470
471
472
473
474 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
475 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
476 &freesp);
477 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
478 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
479 &freesp);
480 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
481 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
482 &freesp);
483
484
485
486
487
488
489
490
491 alloc_blocks = MIN(alloc_blocks, qblocks);
492 shift = MAX(shift, qshift);
493
494 if (shift)
495 alloc_blocks >>= shift;
496
497
498
499
500 if (alloc_blocks)
501 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
502 if (alloc_blocks > MAXEXTLEN)
503 alloc_blocks = MAXEXTLEN;
504
505
506
507
508
509
510
511 while (alloc_blocks && alloc_blocks >= freesp)
512 alloc_blocks >>= 4;
513check_writeio:
514 if (alloc_blocks < mp->m_writeio_blocks)
515 alloc_blocks = mp->m_writeio_blocks;
516 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
517 mp->m_writeio_blocks);
518 return alloc_blocks;
519}
520
521static int
522xfs_file_iomap_begin_delay(
523 struct inode *inode,
524 loff_t offset,
525 loff_t count,
526 struct iomap *iomap)
527{
528 struct xfs_inode *ip = XFS_I(inode);
529 struct xfs_mount *mp = ip->i_mount;
530 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
531 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
532 xfs_fileoff_t maxbytes_fsb =
533 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
534 xfs_fileoff_t end_fsb;
535 int error = 0, eof = 0;
536 struct xfs_bmbt_irec got;
537 struct xfs_iext_cursor icur;
538 xfs_fsblock_t prealloc_blocks = 0;
539
540 ASSERT(!XFS_IS_REALTIME_INODE(ip));
541 ASSERT(!xfs_get_extsz_hint(ip));
542
543 xfs_ilock(ip, XFS_ILOCK_EXCL);
544
545 if (unlikely(XFS_TEST_ERROR(
546 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
547 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
548 mp, XFS_ERRTAG_BMAPIFORMAT))) {
549 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
550 error = -EFSCORRUPTED;
551 goto out_unlock;
552 }
553
554 XFS_STATS_INC(mp, xs_blk_mapw);
555
556 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
557 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
558 if (error)
559 goto out_unlock;
560 }
561
562 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
563 if (!eof && got.br_startoff <= offset_fsb) {
564 if (xfs_is_reflink_inode(ip)) {
565 bool shared;
566
567 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
568 maxbytes_fsb);
569 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
570 error = xfs_reflink_reserve_cow(ip, &got, &shared);
571 if (error)
572 goto out_unlock;
573 }
574
575 trace_xfs_iomap_found(ip, offset, count, 0, &got);
576 goto done;
577 }
578
579 error = xfs_qm_dqattach_locked(ip, 0);
580 if (error)
581 goto out_unlock;
582
583
584
585
586
587
588
589
590
591
592 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
593 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
594
595 if (eof) {
596 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
597 &icur);
598 if (prealloc_blocks) {
599 xfs_extlen_t align;
600 xfs_off_t end_offset;
601 xfs_fileoff_t p_end_fsb;
602
603 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
604 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
605 prealloc_blocks;
606
607 align = xfs_eof_alignment(ip, 0);
608 if (align)
609 p_end_fsb = roundup_64(p_end_fsb, align);
610
611 p_end_fsb = min(p_end_fsb, maxbytes_fsb);
612 ASSERT(p_end_fsb > offset_fsb);
613 prealloc_blocks = p_end_fsb - end_fsb;
614 }
615 }
616
617retry:
618 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
619 end_fsb - offset_fsb, prealloc_blocks, &got, &icur,
620 eof);
621 switch (error) {
622 case 0:
623 break;
624 case -ENOSPC:
625 case -EDQUOT:
626
627 trace_xfs_delalloc_enospc(ip, offset, count);
628 if (prealloc_blocks) {
629 prealloc_blocks = 0;
630 goto retry;
631 }
632
633 default:
634 goto out_unlock;
635 }
636
637
638
639
640
641 iomap->flags = IOMAP_F_NEW;
642 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
643done:
644 if (isnullstartblock(got.br_startblock))
645 got.br_startblock = DELAYSTARTBLOCK;
646
647 if (!got.br_startblock) {
648 error = xfs_alert_fsblock_zero(ip, &got);
649 if (error)
650 goto out_unlock;
651 }
652
653 xfs_bmbt_to_iomap(ip, iomap, &got);
654
655out_unlock:
656 xfs_iunlock(ip, XFS_ILOCK_EXCL);
657 return error;
658}
659
660
661
662
663
664
665
666
667
668
669
670int
671xfs_iomap_write_allocate(
672 xfs_inode_t *ip,
673 int whichfork,
674 xfs_off_t offset,
675 xfs_bmbt_irec_t *imap)
676{
677 xfs_mount_t *mp = ip->i_mount;
678 xfs_fileoff_t offset_fsb, last_block;
679 xfs_fileoff_t end_fsb, map_start_fsb;
680 xfs_fsblock_t first_block;
681 struct xfs_defer_ops dfops;
682 xfs_filblks_t count_fsb;
683 xfs_trans_t *tp;
684 int nimaps;
685 int error = 0;
686 int flags = XFS_BMAPI_DELALLOC;
687 int nres;
688
689 if (whichfork == XFS_COW_FORK)
690 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
691
692
693
694
695 error = xfs_qm_dqattach(ip, 0);
696 if (error)
697 return error;
698
699 offset_fsb = XFS_B_TO_FSBT(mp, offset);
700 count_fsb = imap->br_blockcount;
701 map_start_fsb = imap->br_startoff;
702
703 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
704
705 while (count_fsb != 0) {
706
707
708
709
710
711
712
713
714 nimaps = 0;
715 while (nimaps == 0) {
716 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
717
718
719
720
721
722
723 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
724 0, XFS_TRANS_RESERVE, &tp);
725 if (error)
726 return error;
727
728 xfs_ilock(ip, XFS_ILOCK_EXCL);
729 xfs_trans_ijoin(tp, ip, 0);
730
731 xfs_defer_init(&dfops, &first_block);
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764 nimaps = 1;
765 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
766 error = xfs_bmap_last_offset(ip, &last_block,
767 XFS_DATA_FORK);
768 if (error)
769 goto trans_cancel;
770
771 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
772 if ((map_start_fsb + count_fsb) > last_block) {
773 count_fsb = last_block - map_start_fsb;
774 if (count_fsb == 0) {
775 error = -EAGAIN;
776 goto trans_cancel;
777 }
778 }
779
780
781
782
783
784 error = xfs_bmapi_write(tp, ip, map_start_fsb,
785 count_fsb, flags, &first_block,
786 nres, imap, &nimaps,
787 &dfops);
788 if (error)
789 goto trans_cancel;
790
791 error = xfs_defer_finish(&tp, &dfops);
792 if (error)
793 goto trans_cancel;
794
795 error = xfs_trans_commit(tp);
796 if (error)
797 goto error0;
798
799 xfs_iunlock(ip, XFS_ILOCK_EXCL);
800 }
801
802
803
804
805
806 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
807 return xfs_alert_fsblock_zero(ip, imap);
808
809 if ((offset_fsb >= imap->br_startoff) &&
810 (offset_fsb < (imap->br_startoff +
811 imap->br_blockcount))) {
812 XFS_STATS_INC(mp, xs_xstrat_quick);
813 return 0;
814 }
815
816
817
818
819
820 count_fsb -= imap->br_blockcount;
821 map_start_fsb = imap->br_startoff + imap->br_blockcount;
822 }
823
824trans_cancel:
825 xfs_defer_cancel(&dfops);
826 xfs_trans_cancel(tp);
827error0:
828 xfs_iunlock(ip, XFS_ILOCK_EXCL);
829 return error;
830}
831
832int
833xfs_iomap_write_unwritten(
834 xfs_inode_t *ip,
835 xfs_off_t offset,
836 xfs_off_t count,
837 bool update_isize)
838{
839 xfs_mount_t *mp = ip->i_mount;
840 xfs_fileoff_t offset_fsb;
841 xfs_filblks_t count_fsb;
842 xfs_filblks_t numblks_fsb;
843 xfs_fsblock_t firstfsb;
844 int nimaps;
845 xfs_trans_t *tp;
846 xfs_bmbt_irec_t imap;
847 struct xfs_defer_ops dfops;
848 struct inode *inode = VFS_I(ip);
849 xfs_fsize_t i_size;
850 uint resblks;
851 int error;
852
853 trace_xfs_unwritten_convert(ip, offset, count);
854
855 offset_fsb = XFS_B_TO_FSBT(mp, offset);
856 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
857 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
858
859
860
861
862
863
864
865
866
867
868
869 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
870
871 do {
872
873
874
875
876
877
878
879
880
881 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
882 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
883 if (error)
884 return error;
885
886 xfs_ilock(ip, XFS_ILOCK_EXCL);
887 xfs_trans_ijoin(tp, ip, 0);
888
889
890
891
892 xfs_defer_init(&dfops, &firstfsb);
893 nimaps = 1;
894 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
895 XFS_BMAPI_CONVERT, &firstfsb, resblks,
896 &imap, &nimaps, &dfops);
897 if (error)
898 goto error_on_bmapi_transaction;
899
900
901
902
903
904
905 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
906 if (i_size > offset + count)
907 i_size = offset + count;
908 if (update_isize && i_size > i_size_read(inode))
909 i_size_write(inode, i_size);
910 i_size = xfs_new_eof(ip, i_size);
911 if (i_size) {
912 ip->i_d.di_size = i_size;
913 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
914 }
915
916 error = xfs_defer_finish(&tp, &dfops);
917 if (error)
918 goto error_on_bmapi_transaction;
919
920 error = xfs_trans_commit(tp);
921 xfs_iunlock(ip, XFS_ILOCK_EXCL);
922 if (error)
923 return error;
924
925 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
926 return xfs_alert_fsblock_zero(ip, &imap);
927
928 if ((numblks_fsb = imap.br_blockcount) == 0) {
929
930
931
932
933 ASSERT(imap.br_blockcount);
934 break;
935 }
936 offset_fsb += numblks_fsb;
937 count_fsb -= numblks_fsb;
938 } while (count_fsb > 0);
939
940 return 0;
941
942error_on_bmapi_transaction:
943 xfs_defer_cancel(&dfops);
944 xfs_trans_cancel(tp);
945 xfs_iunlock(ip, XFS_ILOCK_EXCL);
946 return error;
947}
948
949static inline bool imap_needs_alloc(struct inode *inode,
950 struct xfs_bmbt_irec *imap, int nimaps)
951{
952 return !nimaps ||
953 imap->br_startblock == HOLESTARTBLOCK ||
954 imap->br_startblock == DELAYSTARTBLOCK ||
955 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
956}
957
958static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps)
959{
960 return nimaps &&
961 imap->br_startblock != HOLESTARTBLOCK &&
962 imap->br_state != XFS_EXT_UNWRITTEN;
963}
964
965static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
966{
967
968
969
970
971 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
972 return true;
973
974
975
976
977
978
979 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
980 !(ip->i_df.if_flags & XFS_IFEXTENTS))
981 return true;
982 return false;
983}
984
985static int
986xfs_file_iomap_begin(
987 struct inode *inode,
988 loff_t offset,
989 loff_t length,
990 unsigned flags,
991 struct iomap *iomap)
992{
993 struct xfs_inode *ip = XFS_I(inode);
994 struct xfs_mount *mp = ip->i_mount;
995 struct xfs_bmbt_irec imap;
996 xfs_fileoff_t offset_fsb, end_fsb;
997 int nimaps = 1, error = 0;
998 bool shared = false, trimmed = false;
999 unsigned lockmode;
1000
1001 if (XFS_FORCED_SHUTDOWN(mp))
1002 return -EIO;
1003
1004 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
1005 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
1006
1007 return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
1008 }
1009
1010 if (need_excl_ilock(ip, flags))
1011 lockmode = XFS_ILOCK_EXCL;
1012 else
1013 lockmode = XFS_ILOCK_SHARED;
1014
1015 if (flags & IOMAP_NOWAIT) {
1016 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
1017 return -EAGAIN;
1018 if (!xfs_ilock_nowait(ip, lockmode))
1019 return -EAGAIN;
1020 } else {
1021 xfs_ilock(ip, lockmode);
1022 }
1023
1024 ASSERT(offset <= mp->m_super->s_maxbytes);
1025 if (offset > mp->m_super->s_maxbytes - length)
1026 length = mp->m_super->s_maxbytes - offset;
1027 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1028 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1029
1030 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1031 &nimaps, 0);
1032 if (error)
1033 goto out_unlock;
1034
1035 if (flags & IOMAP_REPORT) {
1036
1037 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1038 &trimmed);
1039 if (error)
1040 goto out_unlock;
1041 }
1042
1043 if (xfs_is_reflink_inode(ip) &&
1044 ((flags & IOMAP_WRITE) ||
1045 ((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) {
1046 if (flags & IOMAP_DIRECT) {
1047
1048
1049
1050
1051
1052 if (flags & IOMAP_NOWAIT) {
1053 error = -EAGAIN;
1054 goto out_unlock;
1055 }
1056
1057 error = xfs_reflink_allocate_cow(ip, &imap, &shared,
1058 &lockmode);
1059 if (error)
1060 goto out_unlock;
1061 } else {
1062 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1063 if (error)
1064 goto out_unlock;
1065 }
1066
1067 end_fsb = imap.br_startoff + imap.br_blockcount;
1068 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1069 }
1070
1071 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
1072
1073
1074
1075
1076 if (flags & IOMAP_NOWAIT) {
1077 error = -EAGAIN;
1078 goto out_unlock;
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1091
1092
1093
1094
1095 if (lockmode == XFS_ILOCK_EXCL)
1096 xfs_ilock_demote(ip, lockmode);
1097 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1098 nimaps);
1099 if (error)
1100 return error;
1101
1102 iomap->flags = IOMAP_F_NEW;
1103 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1104 } else {
1105 ASSERT(nimaps);
1106
1107 xfs_iunlock(ip, lockmode);
1108 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1109 }
1110
1111 if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields
1112 & ~XFS_ILOG_TIMESTAMP))
1113 iomap->flags |= IOMAP_F_DIRTY;
1114
1115 xfs_bmbt_to_iomap(ip, iomap, &imap);
1116
1117 if (shared)
1118 iomap->flags |= IOMAP_F_SHARED;
1119 return 0;
1120out_unlock:
1121 xfs_iunlock(ip, lockmode);
1122 return error;
1123}
1124
1125static int
1126xfs_file_iomap_end_delalloc(
1127 struct xfs_inode *ip,
1128 loff_t offset,
1129 loff_t length,
1130 ssize_t written,
1131 struct iomap *iomap)
1132{
1133 struct xfs_mount *mp = ip->i_mount;
1134 xfs_fileoff_t start_fsb;
1135 xfs_fileoff_t end_fsb;
1136 int error = 0;
1137
1138
1139
1140
1141
1142 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1143 iomap->flags |= IOMAP_F_NEW;
1144 written = 0;
1145 }
1146
1147
1148
1149
1150
1151
1152 if (unlikely(!written))
1153 start_fsb = XFS_B_TO_FSBT(mp, offset);
1154 else
1155 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1156 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1167 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1168 XFS_FSB_TO_B(mp, end_fsb) - 1);
1169
1170 xfs_ilock(ip, XFS_ILOCK_EXCL);
1171 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1172 end_fsb - start_fsb);
1173 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1174
1175 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1176 xfs_alert(mp, "%s: unable to clean up ino %lld",
1177 __func__, ip->i_ino);
1178 return error;
1179 }
1180 }
1181
1182 return 0;
1183}
1184
1185static int
1186xfs_file_iomap_end(
1187 struct inode *inode,
1188 loff_t offset,
1189 loff_t length,
1190 ssize_t written,
1191 unsigned flags,
1192 struct iomap *iomap)
1193{
1194 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1195 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1196 length, written, iomap);
1197 return 0;
1198}
1199
1200const struct iomap_ops xfs_iomap_ops = {
1201 .iomap_begin = xfs_file_iomap_begin,
1202 .iomap_end = xfs_file_iomap_end,
1203};
1204
1205static int
1206xfs_xattr_iomap_begin(
1207 struct inode *inode,
1208 loff_t offset,
1209 loff_t length,
1210 unsigned flags,
1211 struct iomap *iomap)
1212{
1213 struct xfs_inode *ip = XFS_I(inode);
1214 struct xfs_mount *mp = ip->i_mount;
1215 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1216 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1217 struct xfs_bmbt_irec imap;
1218 int nimaps = 1, error = 0;
1219 unsigned lockmode;
1220
1221 if (XFS_FORCED_SHUTDOWN(mp))
1222 return -EIO;
1223
1224 lockmode = xfs_ilock_attr_map_shared(ip);
1225
1226
1227 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1228 error = -ENOENT;
1229 goto out_unlock;
1230 }
1231
1232 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1233 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1234 &nimaps, XFS_BMAPI_ATTRFORK);
1235out_unlock:
1236 xfs_iunlock(ip, lockmode);
1237
1238 if (!error) {
1239 ASSERT(nimaps);
1240 xfs_bmbt_to_iomap(ip, iomap, &imap);
1241 }
1242
1243 return error;
1244}
1245
1246const struct iomap_ops xfs_xattr_iomap_ops = {
1247 .iomap_begin = xfs_xattr_iomap_begin,
1248};
1249