1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include <linux/stddef.h>
20#include <linux/errno.h>
21#include <linux/gfp.h>
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/migrate.h>
34#include <linux/backing-dev.h>
35#include <linux/freezer.h>
36
37#include "xfs_log_format.h"
38#include "xfs_trans_resv.h"
39#include "xfs_sb.h"
40#include "xfs_ag.h"
41#include "xfs_mount.h"
42#include "xfs_trace.h"
43#include "xfs_log.h"
44
45static kmem_zone_t *xfs_buf_zone;
46
47static struct workqueue_struct *xfslogd_workqueue;
48
49#ifdef XFS_BUF_LOCK_TRACKING
50# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
51# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
52# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
53#else
54# define XB_SET_OWNER(bp) do { } while (0)
55# define XB_CLEAR_OWNER(bp) do { } while (0)
56# define XB_GET_OWNER(bp) do { } while (0)
57#endif
58
59#define xb_to_gfp(flags) \
60 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
61
62
63static inline int
64xfs_buf_is_vmapped(
65 struct xfs_buf *bp)
66{
67
68
69
70
71
72
73
74 return bp->b_addr && bp->b_page_count > 1;
75}
76
77static inline int
78xfs_buf_vmap_len(
79 struct xfs_buf *bp)
80{
81 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
82}
83
84
85
86
87
88
89
90
91
92void
93xfs_buf_stale(
94 struct xfs_buf *bp)
95{
96 ASSERT(xfs_buf_islocked(bp));
97
98 bp->b_flags |= XBF_STALE;
99
100
101
102
103
104
105 bp->b_flags &= ~_XBF_DELWRI_Q;
106
107 spin_lock(&bp->b_lock);
108 atomic_set(&bp->b_lru_ref, 0);
109 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
110 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
111 atomic_dec(&bp->b_hold);
112
113 ASSERT(atomic_read(&bp->b_hold) >= 1);
114 spin_unlock(&bp->b_lock);
115}
116
117static int
118xfs_buf_get_maps(
119 struct xfs_buf *bp,
120 int map_count)
121{
122 ASSERT(bp->b_maps == NULL);
123 bp->b_map_count = map_count;
124
125 if (map_count == 1) {
126 bp->b_maps = &bp->__b_map;
127 return 0;
128 }
129
130 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
131 KM_NOFS);
132 if (!bp->b_maps)
133 return ENOMEM;
134 return 0;
135}
136
137
138
139
140static void
141xfs_buf_free_maps(
142 struct xfs_buf *bp)
143{
144 if (bp->b_maps != &bp->__b_map) {
145 kmem_free(bp->b_maps);
146 bp->b_maps = NULL;
147 }
148}
149
150struct xfs_buf *
151_xfs_buf_alloc(
152 struct xfs_buftarg *target,
153 struct xfs_buf_map *map,
154 int nmaps,
155 xfs_buf_flags_t flags)
156{
157 struct xfs_buf *bp;
158 int error;
159 int i;
160
161 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
162 if (unlikely(!bp))
163 return NULL;
164
165
166
167
168
169 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
170
171 atomic_set(&bp->b_hold, 1);
172 atomic_set(&bp->b_lru_ref, 1);
173 init_completion(&bp->b_iowait);
174 INIT_LIST_HEAD(&bp->b_lru);
175 INIT_LIST_HEAD(&bp->b_list);
176 RB_CLEAR_NODE(&bp->b_rbnode);
177 sema_init(&bp->b_sema, 0);
178 spin_lock_init(&bp->b_lock);
179 XB_SET_OWNER(bp);
180 bp->b_target = target;
181 bp->b_flags = flags;
182
183
184
185
186
187
188 error = xfs_buf_get_maps(bp, nmaps);
189 if (error) {
190 kmem_zone_free(xfs_buf_zone, bp);
191 return NULL;
192 }
193
194 bp->b_bn = map[0].bm_bn;
195 bp->b_length = 0;
196 for (i = 0; i < nmaps; i++) {
197 bp->b_maps[i].bm_bn = map[i].bm_bn;
198 bp->b_maps[i].bm_len = map[i].bm_len;
199 bp->b_length += map[i].bm_len;
200 }
201 bp->b_io_length = bp->b_length;
202
203 atomic_set(&bp->b_pin_count, 0);
204 init_waitqueue_head(&bp->b_waiters);
205
206 XFS_STATS_INC(xb_create);
207 trace_xfs_buf_init(bp, _RET_IP_);
208
209 return bp;
210}
211
212
213
214
215
216STATIC int
217_xfs_buf_get_pages(
218 xfs_buf_t *bp,
219 int page_count)
220{
221
222 if (bp->b_pages == NULL) {
223 bp->b_page_count = page_count;
224 if (page_count <= XB_PAGES) {
225 bp->b_pages = bp->b_page_array;
226 } else {
227 bp->b_pages = kmem_alloc(sizeof(struct page *) *
228 page_count, KM_NOFS);
229 if (bp->b_pages == NULL)
230 return -ENOMEM;
231 }
232 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
233 }
234 return 0;
235}
236
237
238
239
240STATIC void
241_xfs_buf_free_pages(
242 xfs_buf_t *bp)
243{
244 if (bp->b_pages != bp->b_page_array) {
245 kmem_free(bp->b_pages);
246 bp->b_pages = NULL;
247 }
248}
249
250
251
252
253
254
255
256
257void
258xfs_buf_free(
259 xfs_buf_t *bp)
260{
261 trace_xfs_buf_free(bp, _RET_IP_);
262
263 ASSERT(list_empty(&bp->b_lru));
264
265 if (bp->b_flags & _XBF_PAGES) {
266 uint i;
267
268 if (xfs_buf_is_vmapped(bp))
269 vm_unmap_ram(bp->b_addr - bp->b_offset,
270 bp->b_page_count);
271
272 for (i = 0; i < bp->b_page_count; i++) {
273 struct page *page = bp->b_pages[i];
274
275 __free_page(page);
276 }
277 } else if (bp->b_flags & _XBF_KMEM)
278 kmem_free(bp->b_addr);
279 _xfs_buf_free_pages(bp);
280 xfs_buf_free_maps(bp);
281 kmem_zone_free(xfs_buf_zone, bp);
282}
283
284
285
286
287STATIC int
288xfs_buf_allocate_memory(
289 xfs_buf_t *bp,
290 uint flags)
291{
292 size_t size;
293 size_t nbytes, offset;
294 gfp_t gfp_mask = xb_to_gfp(flags);
295 unsigned short page_count, i;
296 xfs_off_t start, end;
297 int error;
298
299
300
301
302
303
304 size = BBTOB(bp->b_length);
305 if (size < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(size, KM_NOFS);
307 if (!bp->b_addr) {
308
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
313 ((unsigned long)bp->b_addr & PAGE_MASK)) {
314
315 kmem_free(bp->b_addr);
316 bp->b_addr = NULL;
317 goto use_alloc_page;
318 }
319 bp->b_offset = offset_in_page(bp->b_addr);
320 bp->b_pages = bp->b_page_array;
321 bp->b_pages[0] = virt_to_page(bp->b_addr);
322 bp->b_page_count = 1;
323 bp->b_flags |= _XBF_KMEM;
324 return 0;
325 }
326
327use_alloc_page:
328 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
329 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
330 >> PAGE_SHIFT;
331 page_count = end - start;
332 error = _xfs_buf_get_pages(bp, page_count);
333 if (unlikely(error))
334 return error;
335
336 offset = bp->b_offset;
337 bp->b_flags |= _XBF_PAGES;
338
339 for (i = 0; i < bp->b_page_count; i++) {
340 struct page *page;
341 uint retries = 0;
342retry:
343 page = alloc_page(gfp_mask);
344 if (unlikely(page == NULL)) {
345 if (flags & XBF_READ_AHEAD) {
346 bp->b_page_count = i;
347 error = ENOMEM;
348 goto out_free_pages;
349 }
350
351
352
353
354
355
356
357 if (!(++retries % 100))
358 xfs_err(NULL,
359 "possible memory allocation deadlock in %s (mode:0x%x)",
360 __func__, gfp_mask);
361
362 XFS_STATS_INC(xb_page_retries);
363 congestion_wait(BLK_RW_ASYNC, HZ/50);
364 goto retry;
365 }
366
367 XFS_STATS_INC(xb_page_found);
368
369 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
370 size -= nbytes;
371 bp->b_pages[i] = page;
372 offset = 0;
373 }
374 return 0;
375
376out_free_pages:
377 for (i = 0; i < bp->b_page_count; i++)
378 __free_page(bp->b_pages[i]);
379 return error;
380}
381
382
383
384
385STATIC int
386_xfs_buf_map_pages(
387 xfs_buf_t *bp,
388 uint flags)
389{
390 ASSERT(bp->b_flags & _XBF_PAGES);
391 if (bp->b_page_count == 1) {
392
393 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
394 } else if (flags & XBF_UNMAPPED) {
395 bp->b_addr = NULL;
396 } else {
397 int retried = 0;
398 unsigned noio_flag;
399
400
401
402
403
404
405
406
407
408 noio_flag = memalloc_noio_save();
409 do {
410 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
411 -1, PAGE_KERNEL);
412 if (bp->b_addr)
413 break;
414 vm_unmap_aliases();
415 } while (retried++ <= 1);
416 memalloc_noio_restore(noio_flag);
417
418 if (!bp->b_addr)
419 return -ENOMEM;
420 bp->b_addr += bp->b_offset;
421 }
422
423 return 0;
424}
425
426
427
428
429
430
431
432
433
434
435xfs_buf_t *
436_xfs_buf_find(
437 struct xfs_buftarg *btp,
438 struct xfs_buf_map *map,
439 int nmaps,
440 xfs_buf_flags_t flags,
441 xfs_buf_t *new_bp)
442{
443 size_t numbytes;
444 struct xfs_perag *pag;
445 struct rb_node **rbp;
446 struct rb_node *parent;
447 xfs_buf_t *bp;
448 xfs_daddr_t blkno = map[0].bm_bn;
449 xfs_daddr_t eofs;
450 int numblks = 0;
451 int i;
452
453 for (i = 0; i < nmaps; i++)
454 numblks += map[i].bm_len;
455 numbytes = BBTOB(numblks);
456
457
458 ASSERT(!(numbytes < btp->bt_meta_sectorsize));
459 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
460
461
462
463
464
465 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
466 if (blkno >= eofs) {
467
468
469
470
471
472 xfs_alert(btp->bt_mount,
473 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
474 __func__, blkno, eofs);
475 WARN_ON(1);
476 return NULL;
477 }
478
479
480 pag = xfs_perag_get(btp->bt_mount,
481 xfs_daddr_to_agno(btp->bt_mount, blkno));
482
483
484 spin_lock(&pag->pag_buf_lock);
485 rbp = &pag->pag_buf_tree.rb_node;
486 parent = NULL;
487 bp = NULL;
488 while (*rbp) {
489 parent = *rbp;
490 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
491
492 if (blkno < bp->b_bn)
493 rbp = &(*rbp)->rb_left;
494 else if (blkno > bp->b_bn)
495 rbp = &(*rbp)->rb_right;
496 else {
497
498
499
500
501
502
503
504
505 if (bp->b_length != numblks) {
506 ASSERT(bp->b_flags & XBF_STALE);
507 rbp = &(*rbp)->rb_right;
508 continue;
509 }
510 atomic_inc(&bp->b_hold);
511 goto found;
512 }
513 }
514
515
516 if (new_bp) {
517 rb_link_node(&new_bp->b_rbnode, parent, rbp);
518 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
519
520 new_bp->b_pag = pag;
521 spin_unlock(&pag->pag_buf_lock);
522 } else {
523 XFS_STATS_INC(xb_miss_locked);
524 spin_unlock(&pag->pag_buf_lock);
525 xfs_perag_put(pag);
526 }
527 return new_bp;
528
529found:
530 spin_unlock(&pag->pag_buf_lock);
531 xfs_perag_put(pag);
532
533 if (!xfs_buf_trylock(bp)) {
534 if (flags & XBF_TRYLOCK) {
535 xfs_buf_rele(bp);
536 XFS_STATS_INC(xb_busy_locked);
537 return NULL;
538 }
539 xfs_buf_lock(bp);
540 XFS_STATS_INC(xb_get_locked_waited);
541 }
542
543
544
545
546
547
548 if (bp->b_flags & XBF_STALE) {
549 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
550 ASSERT(bp->b_iodone == NULL);
551 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
552 bp->b_ops = NULL;
553 }
554
555 trace_xfs_buf_find(bp, flags, _RET_IP_);
556 XFS_STATS_INC(xb_get_locked);
557 return bp;
558}
559
560
561
562
563
564
565struct xfs_buf *
566xfs_buf_get_map(
567 struct xfs_buftarg *target,
568 struct xfs_buf_map *map,
569 int nmaps,
570 xfs_buf_flags_t flags)
571{
572 struct xfs_buf *bp;
573 struct xfs_buf *new_bp;
574 int error = 0;
575
576 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
577 if (likely(bp))
578 goto found;
579
580 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
581 if (unlikely(!new_bp))
582 return NULL;
583
584 error = xfs_buf_allocate_memory(new_bp, flags);
585 if (error) {
586 xfs_buf_free(new_bp);
587 return NULL;
588 }
589
590 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
591 if (!bp) {
592 xfs_buf_free(new_bp);
593 return NULL;
594 }
595
596 if (bp != new_bp)
597 xfs_buf_free(new_bp);
598
599found:
600 if (!bp->b_addr) {
601 error = _xfs_buf_map_pages(bp, flags);
602 if (unlikely(error)) {
603 xfs_warn(target->bt_mount,
604 "%s: failed to map pagesn", __func__);
605 xfs_buf_relse(bp);
606 return NULL;
607 }
608 }
609
610 XFS_STATS_INC(xb_get);
611 trace_xfs_buf_get(bp, flags, _RET_IP_);
612 return bp;
613}
614
615STATIC int
616_xfs_buf_read(
617 xfs_buf_t *bp,
618 xfs_buf_flags_t flags)
619{
620 ASSERT(!(flags & XBF_WRITE));
621 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
622
623 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
624 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
625
626 xfs_buf_iorequest(bp);
627 if (flags & XBF_ASYNC)
628 return 0;
629 return xfs_buf_iowait(bp);
630}
631
632xfs_buf_t *
633xfs_buf_read_map(
634 struct xfs_buftarg *target,
635 struct xfs_buf_map *map,
636 int nmaps,
637 xfs_buf_flags_t flags,
638 const struct xfs_buf_ops *ops)
639{
640 struct xfs_buf *bp;
641
642 flags |= XBF_READ;
643
644 bp = xfs_buf_get_map(target, map, nmaps, flags);
645 if (bp) {
646 trace_xfs_buf_read(bp, flags, _RET_IP_);
647
648 if (!XFS_BUF_ISDONE(bp)) {
649 XFS_STATS_INC(xb_get_read);
650 bp->b_ops = ops;
651 _xfs_buf_read(bp, flags);
652 } else if (flags & XBF_ASYNC) {
653
654
655
656
657 xfs_buf_relse(bp);
658 return NULL;
659 } else {
660
661 bp->b_flags &= ~XBF_READ;
662 }
663 }
664
665 return bp;
666}
667
668
669
670
671
672void
673xfs_buf_readahead_map(
674 struct xfs_buftarg *target,
675 struct xfs_buf_map *map,
676 int nmaps,
677 const struct xfs_buf_ops *ops)
678{
679 if (bdi_read_congested(target->bt_bdi))
680 return;
681
682 xfs_buf_read_map(target, map, nmaps,
683 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
684}
685
686
687
688
689
690struct xfs_buf *
691xfs_buf_read_uncached(
692 struct xfs_buftarg *target,
693 xfs_daddr_t daddr,
694 size_t numblks,
695 int flags,
696 const struct xfs_buf_ops *ops)
697{
698 struct xfs_buf *bp;
699
700 bp = xfs_buf_get_uncached(target, numblks, flags);
701 if (!bp)
702 return NULL;
703
704
705 ASSERT(bp->b_map_count == 1);
706 bp->b_bn = daddr;
707 bp->b_maps[0].bm_bn = daddr;
708 bp->b_flags |= XBF_READ;
709 bp->b_ops = ops;
710
711 if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
712 xfs_buf_relse(bp);
713 return NULL;
714 }
715 xfs_buf_iorequest(bp);
716 xfs_buf_iowait(bp);
717 return bp;
718}
719
720
721
722
723
724void
725xfs_buf_set_empty(
726 struct xfs_buf *bp,
727 size_t numblks)
728{
729 if (bp->b_pages)
730 _xfs_buf_free_pages(bp);
731
732 bp->b_pages = NULL;
733 bp->b_page_count = 0;
734 bp->b_addr = NULL;
735 bp->b_length = numblks;
736 bp->b_io_length = numblks;
737
738 ASSERT(bp->b_map_count == 1);
739 bp->b_bn = XFS_BUF_DADDR_NULL;
740 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
741 bp->b_maps[0].bm_len = bp->b_length;
742}
743
744static inline struct page *
745mem_to_page(
746 void *addr)
747{
748 if ((!is_vmalloc_addr(addr))) {
749 return virt_to_page(addr);
750 } else {
751 return vmalloc_to_page(addr);
752 }
753}
754
755int
756xfs_buf_associate_memory(
757 xfs_buf_t *bp,
758 void *mem,
759 size_t len)
760{
761 int rval;
762 int i = 0;
763 unsigned long pageaddr;
764 unsigned long offset;
765 size_t buflen;
766 int page_count;
767
768 pageaddr = (unsigned long)mem & PAGE_MASK;
769 offset = (unsigned long)mem - pageaddr;
770 buflen = PAGE_ALIGN(len + offset);
771 page_count = buflen >> PAGE_SHIFT;
772
773
774 if (bp->b_pages)
775 _xfs_buf_free_pages(bp);
776
777 bp->b_pages = NULL;
778 bp->b_addr = mem;
779
780 rval = _xfs_buf_get_pages(bp, page_count);
781 if (rval)
782 return rval;
783
784 bp->b_offset = offset;
785
786 for (i = 0; i < bp->b_page_count; i++) {
787 bp->b_pages[i] = mem_to_page((void *)pageaddr);
788 pageaddr += PAGE_SIZE;
789 }
790
791 bp->b_io_length = BTOBB(len);
792 bp->b_length = BTOBB(buflen);
793
794 return 0;
795}
796
797xfs_buf_t *
798xfs_buf_get_uncached(
799 struct xfs_buftarg *target,
800 size_t numblks,
801 int flags)
802{
803 unsigned long page_count;
804 int error, i;
805 struct xfs_buf *bp;
806 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
807
808 bp = _xfs_buf_alloc(target, &map, 1, 0);
809 if (unlikely(bp == NULL))
810 goto fail;
811
812 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
813 error = _xfs_buf_get_pages(bp, page_count);
814 if (error)
815 goto fail_free_buf;
816
817 for (i = 0; i < page_count; i++) {
818 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
819 if (!bp->b_pages[i])
820 goto fail_free_mem;
821 }
822 bp->b_flags |= _XBF_PAGES;
823
824 error = _xfs_buf_map_pages(bp, 0);
825 if (unlikely(error)) {
826 xfs_warn(target->bt_mount,
827 "%s: failed to map pages", __func__);
828 goto fail_free_mem;
829 }
830
831 trace_xfs_buf_get_uncached(bp, _RET_IP_);
832 return bp;
833
834 fail_free_mem:
835 while (--i >= 0)
836 __free_page(bp->b_pages[i]);
837 _xfs_buf_free_pages(bp);
838 fail_free_buf:
839 xfs_buf_free_maps(bp);
840 kmem_zone_free(xfs_buf_zone, bp);
841 fail:
842 return NULL;
843}
844
845
846
847
848
849
850void
851xfs_buf_hold(
852 xfs_buf_t *bp)
853{
854 trace_xfs_buf_hold(bp, _RET_IP_);
855 atomic_inc(&bp->b_hold);
856}
857
858
859
860
861
862void
863xfs_buf_rele(
864 xfs_buf_t *bp)
865{
866 struct xfs_perag *pag = bp->b_pag;
867
868 trace_xfs_buf_rele(bp, _RET_IP_);
869
870 if (!pag) {
871 ASSERT(list_empty(&bp->b_lru));
872 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
873 if (atomic_dec_and_test(&bp->b_hold))
874 xfs_buf_free(bp);
875 return;
876 }
877
878 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
879
880 ASSERT(atomic_read(&bp->b_hold) > 0);
881 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
882 spin_lock(&bp->b_lock);
883 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
884
885
886
887
888
889 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
890 bp->b_state &= ~XFS_BSTATE_DISPOSE;
891 atomic_inc(&bp->b_hold);
892 }
893 spin_unlock(&bp->b_lock);
894 spin_unlock(&pag->pag_buf_lock);
895 } else {
896
897
898
899
900
901
902 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
903 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
904 } else {
905 ASSERT(list_empty(&bp->b_lru));
906 }
907 spin_unlock(&bp->b_lock);
908
909 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
910 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
911 spin_unlock(&pag->pag_buf_lock);
912 xfs_perag_put(pag);
913 xfs_buf_free(bp);
914 }
915 }
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930int
931xfs_buf_trylock(
932 struct xfs_buf *bp)
933{
934 int locked;
935
936 locked = down_trylock(&bp->b_sema) == 0;
937 if (locked)
938 XB_SET_OWNER(bp);
939
940 trace_xfs_buf_trylock(bp, _RET_IP_);
941 return locked;
942}
943
944
945
946
947
948
949
950
951
952
953void
954xfs_buf_lock(
955 struct xfs_buf *bp)
956{
957 trace_xfs_buf_lock(bp, _RET_IP_);
958
959 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
960 xfs_log_force(bp->b_target->bt_mount, 0);
961 down(&bp->b_sema);
962 XB_SET_OWNER(bp);
963
964 trace_xfs_buf_lock_done(bp, _RET_IP_);
965}
966
967void
968xfs_buf_unlock(
969 struct xfs_buf *bp)
970{
971 XB_CLEAR_OWNER(bp);
972 up(&bp->b_sema);
973
974 trace_xfs_buf_unlock(bp, _RET_IP_);
975}
976
977STATIC void
978xfs_buf_wait_unpin(
979 xfs_buf_t *bp)
980{
981 DECLARE_WAITQUEUE (wait, current);
982
983 if (atomic_read(&bp->b_pin_count) == 0)
984 return;
985
986 add_wait_queue(&bp->b_waiters, &wait);
987 for (;;) {
988 set_current_state(TASK_UNINTERRUPTIBLE);
989 if (atomic_read(&bp->b_pin_count) == 0)
990 break;
991 io_schedule();
992 }
993 remove_wait_queue(&bp->b_waiters, &wait);
994 set_current_state(TASK_RUNNING);
995}
996
997
998
999
1000
1001STATIC void
1002xfs_buf_iodone_work(
1003 struct work_struct *work)
1004{
1005 struct xfs_buf *bp =
1006 container_of(work, xfs_buf_t, b_iodone_work);
1007 bool read = !!(bp->b_flags & XBF_READ);
1008
1009 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1010
1011
1012 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
1013 bp->b_ops->verify_read(bp);
1014
1015 if (bp->b_iodone)
1016 (*(bp->b_iodone))(bp);
1017 else if (bp->b_flags & XBF_ASYNC)
1018 xfs_buf_relse(bp);
1019 else {
1020 ASSERT(read && bp->b_ops);
1021 complete(&bp->b_iowait);
1022 }
1023}
1024
1025void
1026xfs_buf_ioend(
1027 struct xfs_buf *bp,
1028 int schedule)
1029{
1030 bool read = !!(bp->b_flags & XBF_READ);
1031
1032 trace_xfs_buf_iodone(bp, _RET_IP_);
1033
1034 if (bp->b_error == 0)
1035 bp->b_flags |= XBF_DONE;
1036
1037 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1038 if (schedule) {
1039 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1040 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1041 } else {
1042 xfs_buf_iodone_work(&bp->b_iodone_work);
1043 }
1044 } else {
1045 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1046 complete(&bp->b_iowait);
1047 }
1048}
1049
1050void
1051xfs_buf_ioerror(
1052 xfs_buf_t *bp,
1053 int error)
1054{
1055 ASSERT(error >= 0 && error <= 0xffff);
1056 bp->b_error = (unsigned short)error;
1057 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1058}
1059
1060void
1061xfs_buf_ioerror_alert(
1062 struct xfs_buf *bp,
1063 const char *func)
1064{
1065 xfs_alert(bp->b_target->bt_mount,
1066"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1067 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1068}
1069
1070
1071
1072
1073
1074
1075STATIC int
1076xfs_bioerror(
1077 xfs_buf_t *bp)
1078{
1079#ifdef XFSERRORDEBUG
1080 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1081#endif
1082
1083
1084
1085
1086 xfs_buf_ioerror(bp, EIO);
1087
1088
1089
1090
1091 XFS_BUF_UNREAD(bp);
1092 XFS_BUF_UNDONE(bp);
1093 xfs_buf_stale(bp);
1094
1095 xfs_buf_ioend(bp, 0);
1096
1097 return EIO;
1098}
1099
1100
1101
1102
1103
1104
1105
1106int
1107xfs_bioerror_relse(
1108 struct xfs_buf *bp)
1109{
1110 int64_t fl = bp->b_flags;
1111
1112
1113
1114
1115
1116
1117
1118
1119 XFS_BUF_UNREAD(bp);
1120 XFS_BUF_DONE(bp);
1121 xfs_buf_stale(bp);
1122 bp->b_iodone = NULL;
1123 if (!(fl & XBF_ASYNC)) {
1124
1125
1126
1127
1128
1129
1130 xfs_buf_ioerror(bp, EIO);
1131 complete(&bp->b_iowait);
1132 } else {
1133 xfs_buf_relse(bp);
1134 }
1135
1136 return EIO;
1137}
1138
1139STATIC int
1140xfs_bdstrat_cb(
1141 struct xfs_buf *bp)
1142{
1143 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1144 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1145
1146
1147
1148
1149
1150 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1151 return xfs_bioerror_relse(bp);
1152 else
1153 return xfs_bioerror(bp);
1154 }
1155
1156 xfs_buf_iorequest(bp);
1157 return 0;
1158}
1159
1160int
1161xfs_bwrite(
1162 struct xfs_buf *bp)
1163{
1164 int error;
1165
1166 ASSERT(xfs_buf_islocked(bp));
1167
1168 bp->b_flags |= XBF_WRITE;
1169 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
1170
1171 xfs_bdstrat_cb(bp);
1172
1173 error = xfs_buf_iowait(bp);
1174 if (error) {
1175 xfs_force_shutdown(bp->b_target->bt_mount,
1176 SHUTDOWN_META_IO_ERROR);
1177 }
1178 return error;
1179}
1180
1181STATIC void
1182_xfs_buf_ioend(
1183 xfs_buf_t *bp,
1184 int schedule)
1185{
1186 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1187 xfs_buf_ioend(bp, schedule);
1188}
1189
1190STATIC void
1191xfs_buf_bio_end_io(
1192 struct bio *bio,
1193 int error)
1194{
1195 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1196
1197
1198
1199
1200
1201 if (!bp->b_error)
1202 xfs_buf_ioerror(bp, -error);
1203
1204 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1205 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1206
1207 _xfs_buf_ioend(bp, 1);
1208 bio_put(bio);
1209}
1210
1211static void
1212xfs_buf_ioapply_map(
1213 struct xfs_buf *bp,
1214 int map,
1215 int *buf_offset,
1216 int *count,
1217 int rw)
1218{
1219 int page_index;
1220 int total_nr_pages = bp->b_page_count;
1221 int nr_pages;
1222 struct bio *bio;
1223 sector_t sector = bp->b_maps[map].bm_bn;
1224 int size;
1225 int offset;
1226
1227 total_nr_pages = bp->b_page_count;
1228
1229
1230 page_index = 0;
1231 offset = *buf_offset;
1232 while (offset >= PAGE_SIZE) {
1233 page_index++;
1234 offset -= PAGE_SIZE;
1235 }
1236
1237
1238
1239
1240
1241 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1242 *count -= size;
1243 *buf_offset += size;
1244
1245next_chunk:
1246 atomic_inc(&bp->b_io_remaining);
1247 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1248 if (nr_pages > total_nr_pages)
1249 nr_pages = total_nr_pages;
1250
1251 bio = bio_alloc(GFP_NOIO, nr_pages);
1252 bio->bi_bdev = bp->b_target->bt_bdev;
1253 bio->bi_iter.bi_sector = sector;
1254 bio->bi_end_io = xfs_buf_bio_end_io;
1255 bio->bi_private = bp;
1256
1257
1258 for (; size && nr_pages; nr_pages--, page_index++) {
1259 int rbytes, nbytes = PAGE_SIZE - offset;
1260
1261 if (nbytes > size)
1262 nbytes = size;
1263
1264 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1265 offset);
1266 if (rbytes < nbytes)
1267 break;
1268
1269 offset = 0;
1270 sector += BTOBB(nbytes);
1271 size -= nbytes;
1272 total_nr_pages--;
1273 }
1274
1275 if (likely(bio->bi_iter.bi_size)) {
1276 if (xfs_buf_is_vmapped(bp)) {
1277 flush_kernel_vmap_range(bp->b_addr,
1278 xfs_buf_vmap_len(bp));
1279 }
1280 submit_bio(rw, bio);
1281 if (size)
1282 goto next_chunk;
1283 } else {
1284
1285
1286
1287
1288 atomic_dec(&bp->b_io_remaining);
1289 xfs_buf_ioerror(bp, EIO);
1290 bio_put(bio);
1291 }
1292
1293}
1294
1295STATIC void
1296_xfs_buf_ioapply(
1297 struct xfs_buf *bp)
1298{
1299 struct blk_plug plug;
1300 int rw;
1301 int offset;
1302 int size;
1303 int i;
1304
1305
1306
1307
1308
1309 bp->b_error = 0;
1310
1311 if (bp->b_flags & XBF_WRITE) {
1312 if (bp->b_flags & XBF_SYNCIO)
1313 rw = WRITE_SYNC;
1314 else
1315 rw = WRITE;
1316 if (bp->b_flags & XBF_FUA)
1317 rw |= REQ_FUA;
1318 if (bp->b_flags & XBF_FLUSH)
1319 rw |= REQ_FLUSH;
1320
1321
1322
1323
1324
1325
1326 if (bp->b_ops) {
1327 bp->b_ops->verify_write(bp);
1328 if (bp->b_error) {
1329 xfs_force_shutdown(bp->b_target->bt_mount,
1330 SHUTDOWN_CORRUPT_INCORE);
1331 return;
1332 }
1333 }
1334 } else if (bp->b_flags & XBF_READ_AHEAD) {
1335 rw = READA;
1336 } else {
1337 rw = READ;
1338 }
1339
1340
1341 rw |= REQ_META;
1342
1343
1344
1345
1346
1347
1348
1349 offset = bp->b_offset;
1350 size = BBTOB(bp->b_io_length);
1351 blk_start_plug(&plug);
1352 for (i = 0; i < bp->b_map_count; i++) {
1353 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1354 if (bp->b_error)
1355 break;
1356 if (size <= 0)
1357 break;
1358 }
1359 blk_finish_plug(&plug);
1360}
1361
1362void
1363xfs_buf_iorequest(
1364 xfs_buf_t *bp)
1365{
1366 trace_xfs_buf_iorequest(bp, _RET_IP_);
1367
1368 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1369
1370 if (bp->b_flags & XBF_WRITE)
1371 xfs_buf_wait_unpin(bp);
1372 xfs_buf_hold(bp);
1373
1374
1375
1376
1377
1378
1379 atomic_set(&bp->b_io_remaining, 1);
1380 _xfs_buf_ioapply(bp);
1381
1382
1383
1384
1385
1386
1387 _xfs_buf_ioend(bp, bp->b_error ? 0 : 1);
1388
1389 xfs_buf_rele(bp);
1390}
1391
1392
1393
1394
1395
1396
1397
1398int
1399xfs_buf_iowait(
1400 xfs_buf_t *bp)
1401{
1402 trace_xfs_buf_iowait(bp, _RET_IP_);
1403
1404 if (!bp->b_error)
1405 wait_for_completion(&bp->b_iowait);
1406
1407 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1408 return bp->b_error;
1409}
1410
1411xfs_caddr_t
1412xfs_buf_offset(
1413 xfs_buf_t *bp,
1414 size_t offset)
1415{
1416 struct page *page;
1417
1418 if (bp->b_addr)
1419 return bp->b_addr + offset;
1420
1421 offset += bp->b_offset;
1422 page = bp->b_pages[offset >> PAGE_SHIFT];
1423 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1424}
1425
1426
1427
1428
1429void
1430xfs_buf_iomove(
1431 xfs_buf_t *bp,
1432 size_t boff,
1433 size_t bsize,
1434 void *data,
1435 xfs_buf_rw_t mode)
1436{
1437 size_t bend;
1438
1439 bend = boff + bsize;
1440 while (boff < bend) {
1441 struct page *page;
1442 int page_index, page_offset, csize;
1443
1444 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1445 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1446 page = bp->b_pages[page_index];
1447 csize = min_t(size_t, PAGE_SIZE - page_offset,
1448 BBTOB(bp->b_io_length) - boff);
1449
1450 ASSERT((csize + page_offset) <= PAGE_SIZE);
1451
1452 switch (mode) {
1453 case XBRW_ZERO:
1454 memset(page_address(page) + page_offset, 0, csize);
1455 break;
1456 case XBRW_READ:
1457 memcpy(data, page_address(page) + page_offset, csize);
1458 break;
1459 case XBRW_WRITE:
1460 memcpy(page_address(page) + page_offset, data, csize);
1461 }
1462
1463 boff += csize;
1464 data += csize;
1465 }
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static enum lru_status
1478xfs_buftarg_wait_rele(
1479 struct list_head *item,
1480 spinlock_t *lru_lock,
1481 void *arg)
1482
1483{
1484 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1485 struct list_head *dispose = arg;
1486
1487 if (atomic_read(&bp->b_hold) > 1) {
1488
1489 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1490 return LRU_SKIP;
1491 }
1492 if (!spin_trylock(&bp->b_lock))
1493 return LRU_SKIP;
1494
1495
1496
1497
1498
1499 atomic_set(&bp->b_lru_ref, 0);
1500 bp->b_state |= XFS_BSTATE_DISPOSE;
1501 list_move(item, dispose);
1502 spin_unlock(&bp->b_lock);
1503 return LRU_REMOVED;
1504}
1505
1506void
1507xfs_wait_buftarg(
1508 struct xfs_buftarg *btp)
1509{
1510 LIST_HEAD(dispose);
1511 int loop = 0;
1512
1513
1514 while (list_lru_count(&btp->bt_lru)) {
1515 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1516 &dispose, LONG_MAX);
1517
1518 while (!list_empty(&dispose)) {
1519 struct xfs_buf *bp;
1520 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1521 list_del_init(&bp->b_lru);
1522 if (bp->b_flags & XBF_WRITE_FAIL) {
1523 xfs_alert(btp->bt_mount,
1524"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1525"Please run xfs_repair to determine the extent of the problem.",
1526 (long long)bp->b_bn);
1527 }
1528 xfs_buf_rele(bp);
1529 }
1530 if (loop++ != 0)
1531 delay(100);
1532 }
1533}
1534
1535static enum lru_status
1536xfs_buftarg_isolate(
1537 struct list_head *item,
1538 spinlock_t *lru_lock,
1539 void *arg)
1540{
1541 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1542 struct list_head *dispose = arg;
1543
1544
1545
1546
1547
1548 if (!spin_trylock(&bp->b_lock))
1549 return LRU_SKIP;
1550
1551
1552
1553
1554
1555 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1556 spin_unlock(&bp->b_lock);
1557 return LRU_ROTATE;
1558 }
1559
1560 bp->b_state |= XFS_BSTATE_DISPOSE;
1561 list_move(item, dispose);
1562 spin_unlock(&bp->b_lock);
1563 return LRU_REMOVED;
1564}
1565
1566static unsigned long
1567xfs_buftarg_shrink_scan(
1568 struct shrinker *shrink,
1569 struct shrink_control *sc)
1570{
1571 struct xfs_buftarg *btp = container_of(shrink,
1572 struct xfs_buftarg, bt_shrinker);
1573 LIST_HEAD(dispose);
1574 unsigned long freed;
1575 unsigned long nr_to_scan = sc->nr_to_scan;
1576
1577 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1578 &dispose, &nr_to_scan);
1579
1580 while (!list_empty(&dispose)) {
1581 struct xfs_buf *bp;
1582 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1583 list_del_init(&bp->b_lru);
1584 xfs_buf_rele(bp);
1585 }
1586
1587 return freed;
1588}
1589
1590static unsigned long
1591xfs_buftarg_shrink_count(
1592 struct shrinker *shrink,
1593 struct shrink_control *sc)
1594{
1595 struct xfs_buftarg *btp = container_of(shrink,
1596 struct xfs_buftarg, bt_shrinker);
1597 return list_lru_count_node(&btp->bt_lru, sc->nid);
1598}
1599
1600void
1601xfs_free_buftarg(
1602 struct xfs_mount *mp,
1603 struct xfs_buftarg *btp)
1604{
1605 unregister_shrinker(&btp->bt_shrinker);
1606 list_lru_destroy(&btp->bt_lru);
1607
1608 if (mp->m_flags & XFS_MOUNT_BARRIER)
1609 xfs_blkdev_issue_flush(btp);
1610
1611 kmem_free(btp);
1612}
1613
1614int
1615xfs_setsize_buftarg(
1616 xfs_buftarg_t *btp,
1617 unsigned int sectorsize)
1618{
1619
1620 btp->bt_meta_sectorsize = sectorsize;
1621 btp->bt_meta_sectormask = sectorsize - 1;
1622
1623 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1624 char name[BDEVNAME_SIZE];
1625
1626 bdevname(btp->bt_bdev, name);
1627
1628 xfs_warn(btp->bt_mount,
1629 "Cannot set_blocksize to %u on device %s",
1630 sectorsize, name);
1631 return EINVAL;
1632 }
1633
1634
1635 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1636 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1637
1638 return 0;
1639}
1640
1641
1642
1643
1644
1645
1646STATIC int
1647xfs_setsize_buftarg_early(
1648 xfs_buftarg_t *btp,
1649 struct block_device *bdev)
1650{
1651 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1652}
1653
1654xfs_buftarg_t *
1655xfs_alloc_buftarg(
1656 struct xfs_mount *mp,
1657 struct block_device *bdev)
1658{
1659 xfs_buftarg_t *btp;
1660
1661 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1662
1663 btp->bt_mount = mp;
1664 btp->bt_dev = bdev->bd_dev;
1665 btp->bt_bdev = bdev;
1666 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1667 if (!btp->bt_bdi)
1668 goto error;
1669
1670 if (xfs_setsize_buftarg_early(btp, bdev))
1671 goto error;
1672
1673 if (list_lru_init(&btp->bt_lru))
1674 goto error;
1675
1676 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1677 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1678 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1679 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1680 register_shrinker(&btp->bt_shrinker);
1681 return btp;
1682
1683error:
1684 kmem_free(btp);
1685 return NULL;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699bool
1700xfs_buf_delwri_queue(
1701 struct xfs_buf *bp,
1702 struct list_head *list)
1703{
1704 ASSERT(xfs_buf_islocked(bp));
1705 ASSERT(!(bp->b_flags & XBF_READ));
1706
1707
1708
1709
1710
1711
1712 if (bp->b_flags & _XBF_DELWRI_Q) {
1713 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1714 return false;
1715 }
1716
1717 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727 bp->b_flags |= _XBF_DELWRI_Q;
1728 if (list_empty(&bp->b_list)) {
1729 atomic_inc(&bp->b_hold);
1730 list_add_tail(&bp->b_list, list);
1731 }
1732
1733 return true;
1734}
1735
1736
1737
1738
1739
1740
1741static int
1742xfs_buf_cmp(
1743 void *priv,
1744 struct list_head *a,
1745 struct list_head *b)
1746{
1747 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1748 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1749 xfs_daddr_t diff;
1750
1751 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1752 if (diff < 0)
1753 return -1;
1754 if (diff > 0)
1755 return 1;
1756 return 0;
1757}
1758
1759static int
1760__xfs_buf_delwri_submit(
1761 struct list_head *buffer_list,
1762 struct list_head *io_list,
1763 bool wait)
1764{
1765 struct blk_plug plug;
1766 struct xfs_buf *bp, *n;
1767 int pinned = 0;
1768
1769 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1770 if (!wait) {
1771 if (xfs_buf_ispinned(bp)) {
1772 pinned++;
1773 continue;
1774 }
1775 if (!xfs_buf_trylock(bp))
1776 continue;
1777 } else {
1778 xfs_buf_lock(bp);
1779 }
1780
1781
1782
1783
1784
1785
1786
1787 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1788 list_del_init(&bp->b_list);
1789 xfs_buf_relse(bp);
1790 continue;
1791 }
1792
1793 list_move_tail(&bp->b_list, io_list);
1794 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1795 }
1796
1797 list_sort(NULL, io_list, xfs_buf_cmp);
1798
1799 blk_start_plug(&plug);
1800 list_for_each_entry_safe(bp, n, io_list, b_list) {
1801 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1802 bp->b_flags |= XBF_WRITE;
1803
1804 if (!wait) {
1805 bp->b_flags |= XBF_ASYNC;
1806 list_del_init(&bp->b_list);
1807 }
1808 xfs_bdstrat_cb(bp);
1809 }
1810 blk_finish_plug(&plug);
1811
1812 return pinned;
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824int
1825xfs_buf_delwri_submit_nowait(
1826 struct list_head *buffer_list)
1827{
1828 LIST_HEAD (io_list);
1829 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840int
1841xfs_buf_delwri_submit(
1842 struct list_head *buffer_list)
1843{
1844 LIST_HEAD (io_list);
1845 int error = 0, error2;
1846 struct xfs_buf *bp;
1847
1848 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1849
1850
1851 while (!list_empty(&io_list)) {
1852 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1853
1854 list_del_init(&bp->b_list);
1855 error2 = xfs_buf_iowait(bp);
1856 xfs_buf_relse(bp);
1857 if (!error)
1858 error = error2;
1859 }
1860
1861 return error;
1862}
1863
1864int __init
1865xfs_buf_init(void)
1866{
1867 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1868 KM_ZONE_HWALIGN, NULL);
1869 if (!xfs_buf_zone)
1870 goto out;
1871
1872 xfslogd_workqueue = alloc_workqueue("xfslogd",
1873 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1874 if (!xfslogd_workqueue)
1875 goto out_free_buf_zone;
1876
1877 return 0;
1878
1879 out_free_buf_zone:
1880 kmem_zone_destroy(xfs_buf_zone);
1881 out:
1882 return -ENOMEM;
1883}
1884
1885void
1886xfs_buf_terminate(void)
1887{
1888 destroy_workqueue(xfslogd_workqueue);
1889 kmem_zone_destroy(xfs_buf_zone);
1890}
1891