1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/sched/signal.h>
24#include <linux/syscalls.h>
25#include <linux/fs.h>
26#include <linux/iomap.h>
27#include <linux/mm.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
30#include <linux/capability.h>
31#include <linux/blkdev.h>
32#include <linux/file.h>
33#include <linux/quotaops.h>
34#include <linux/highmem.h>
35#include <linux/export.h>
36#include <linux/backing-dev.h>
37#include <linux/writeback.h>
38#include <linux/hash.h>
39#include <linux/suspend.h>
40#include <linux/buffer_head.h>
41#include <linux/task_io_accounting_ops.h>
42#include <linux/bio.h>
43#include <linux/cpu.h>
44#include <linux/bitops.h>
45#include <linux/mpage.h>
46#include <linux/bit_spinlock.h>
47#include <linux/pagevec.h>
48#include <linux/sched/mm.h>
49#include <trace/events/block.h>
50
51static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
52static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
53 enum rw_hint hint, struct writeback_control *wbc);
54
55#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
56
57inline void touch_buffer(struct buffer_head *bh)
58{
59 trace_block_touch_buffer(bh);
60 mark_page_accessed(bh->b_page);
61}
62EXPORT_SYMBOL(touch_buffer);
63
64void __lock_buffer(struct buffer_head *bh)
65{
66 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
67}
68EXPORT_SYMBOL(__lock_buffer);
69
70void unlock_buffer(struct buffer_head *bh)
71{
72 clear_bit_unlock(BH_Lock, &bh->b_state);
73 smp_mb__after_atomic();
74 wake_up_bit(&bh->b_state, BH_Lock);
75}
76EXPORT_SYMBOL(unlock_buffer);
77
78
79
80
81
82
83void buffer_check_dirty_writeback(struct page *page,
84 bool *dirty, bool *writeback)
85{
86 struct buffer_head *head, *bh;
87 *dirty = false;
88 *writeback = false;
89
90 BUG_ON(!PageLocked(page));
91
92 if (!page_has_buffers(page))
93 return;
94
95 if (PageWriteback(page))
96 *writeback = true;
97
98 head = page_buffers(page);
99 bh = head;
100 do {
101 if (buffer_locked(bh))
102 *writeback = true;
103
104 if (buffer_dirty(bh))
105 *dirty = true;
106
107 bh = bh->b_this_page;
108 } while (bh != head);
109}
110EXPORT_SYMBOL(buffer_check_dirty_writeback);
111
112
113
114
115
116
117void __wait_on_buffer(struct buffer_head * bh)
118{
119 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
120}
121EXPORT_SYMBOL(__wait_on_buffer);
122
123static void
124__clear_page_buffers(struct page *page)
125{
126 ClearPagePrivate(page);
127 set_page_private(page, 0);
128 put_page(page);
129}
130
131static void buffer_io_error(struct buffer_head *bh, char *msg)
132{
133 if (!test_bit(BH_Quiet, &bh->b_state))
134 printk_ratelimited(KERN_ERR
135 "Buffer I/O error on dev %pg, logical block %llu%s\n",
136 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
137}
138
139
140
141
142
143
144
145
146
147static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
148{
149 if (uptodate) {
150 set_buffer_uptodate(bh);
151 } else {
152
153 clear_buffer_uptodate(bh);
154 }
155 unlock_buffer(bh);
156}
157
158
159
160
161
162void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
163{
164 __end_buffer_read_notouch(bh, uptodate);
165 put_bh(bh);
166}
167EXPORT_SYMBOL(end_buffer_read_sync);
168
169void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
170{
171 if (uptodate) {
172 set_buffer_uptodate(bh);
173 } else {
174 buffer_io_error(bh, ", lost sync page write");
175 mark_buffer_write_io_error(bh);
176 clear_buffer_uptodate(bh);
177 }
178 unlock_buffer(bh);
179 put_bh(bh);
180}
181EXPORT_SYMBOL(end_buffer_write_sync);
182
183
184
185
186
187
188
189
190
191
192
193static struct buffer_head *
194__find_get_block_slow(struct block_device *bdev, sector_t block)
195{
196 struct inode *bd_inode = bdev->bd_inode;
197 struct address_space *bd_mapping = bd_inode->i_mapping;
198 struct buffer_head *ret = NULL;
199 pgoff_t index;
200 struct buffer_head *bh;
201 struct buffer_head *head;
202 struct page *page;
203 int all_mapped = 1;
204 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
205
206 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
207 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
208 if (!page)
209 goto out;
210
211 spin_lock(&bd_mapping->private_lock);
212 if (!page_has_buffers(page))
213 goto out_unlock;
214 head = page_buffers(page);
215 bh = head;
216 do {
217 if (!buffer_mapped(bh))
218 all_mapped = 0;
219 else if (bh->b_blocknr == block) {
220 ret = bh;
221 get_bh(bh);
222 goto out_unlock;
223 }
224 bh = bh->b_this_page;
225 } while (bh != head);
226
227
228
229
230
231
232 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
233 if (all_mapped && __ratelimit(&last_warned)) {
234 printk("__find_get_block_slow() failed. block=%llu, "
235 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
236 "device %pg blocksize: %d\n",
237 (unsigned long long)block,
238 (unsigned long long)bh->b_blocknr,
239 bh->b_state, bh->b_size, bdev,
240 1 << bd_inode->i_blkbits);
241 }
242out_unlock:
243 spin_unlock(&bd_mapping->private_lock);
244 put_page(page);
245out:
246 return ret;
247}
248
249
250
251
252
253static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
254{
255 unsigned long flags;
256 struct buffer_head *first;
257 struct buffer_head *tmp;
258 struct page *page;
259 int page_uptodate = 1;
260
261 BUG_ON(!buffer_async_read(bh));
262
263 page = bh->b_page;
264 if (uptodate) {
265 set_buffer_uptodate(bh);
266 } else {
267 clear_buffer_uptodate(bh);
268 buffer_io_error(bh, ", async page read");
269 SetPageError(page);
270 }
271
272
273
274
275
276
277 first = page_buffers(page);
278 local_irq_save(flags);
279 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
280 clear_buffer_async_read(bh);
281 unlock_buffer(bh);
282 tmp = bh;
283 do {
284 if (!buffer_uptodate(tmp))
285 page_uptodate = 0;
286 if (buffer_async_read(tmp)) {
287 BUG_ON(!buffer_locked(tmp));
288 goto still_busy;
289 }
290 tmp = tmp->b_this_page;
291 } while (tmp != bh);
292 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
293 local_irq_restore(flags);
294
295
296
297
298
299 if (page_uptodate && !PageError(page))
300 SetPageUptodate(page);
301 unlock_page(page);
302 return;
303
304still_busy:
305 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
306 local_irq_restore(flags);
307 return;
308}
309
310
311
312
313
314void end_buffer_async_write(struct buffer_head *bh, int uptodate)
315{
316 unsigned long flags;
317 struct buffer_head *first;
318 struct buffer_head *tmp;
319 struct page *page;
320
321 BUG_ON(!buffer_async_write(bh));
322
323 page = bh->b_page;
324 if (uptodate) {
325 set_buffer_uptodate(bh);
326 } else {
327 buffer_io_error(bh, ", lost async page write");
328 mark_buffer_write_io_error(bh);
329 clear_buffer_uptodate(bh);
330 SetPageError(page);
331 }
332
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336
337 clear_buffer_async_write(bh);
338 unlock_buffer(bh);
339 tmp = bh->b_this_page;
340 while (tmp != bh) {
341 if (buffer_async_write(tmp)) {
342 BUG_ON(!buffer_locked(tmp));
343 goto still_busy;
344 }
345 tmp = tmp->b_this_page;
346 }
347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
348 local_irq_restore(flags);
349 end_page_writeback(page);
350 return;
351
352still_busy:
353 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
354 local_irq_restore(flags);
355 return;
356}
357EXPORT_SYMBOL(end_buffer_async_write);
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380static void mark_buffer_async_read(struct buffer_head *bh)
381{
382 bh->b_end_io = end_buffer_async_read;
383 set_buffer_async_read(bh);
384}
385
386static void mark_buffer_async_write_endio(struct buffer_head *bh,
387 bh_end_io_t *handler)
388{
389 bh->b_end_io = handler;
390 set_buffer_async_write(bh);
391}
392
393void mark_buffer_async_write(struct buffer_head *bh)
394{
395 mark_buffer_async_write_endio(bh, end_buffer_async_write);
396}
397EXPORT_SYMBOL(mark_buffer_async_write);
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452static void __remove_assoc_queue(struct buffer_head *bh)
453{
454 list_del_init(&bh->b_assoc_buffers);
455 WARN_ON(!bh->b_assoc_map);
456 bh->b_assoc_map = NULL;
457}
458
459int inode_has_buffers(struct inode *inode)
460{
461 return !list_empty(&inode->i_data.private_list);
462}
463
464
465
466
467
468
469
470
471
472
473
474static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
475{
476 struct buffer_head *bh;
477 struct list_head *p;
478 int err = 0;
479
480 spin_lock(lock);
481repeat:
482 list_for_each_prev(p, list) {
483 bh = BH_ENTRY(p);
484 if (buffer_locked(bh)) {
485 get_bh(bh);
486 spin_unlock(lock);
487 wait_on_buffer(bh);
488 if (!buffer_uptodate(bh))
489 err = -EIO;
490 brelse(bh);
491 spin_lock(lock);
492 goto repeat;
493 }
494 }
495 spin_unlock(lock);
496 return err;
497}
498
499void emergency_thaw_bdev(struct super_block *sb)
500{
501 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
502 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
503}
504
505
506
507
508
509
510
511
512
513
514
515
516int sync_mapping_buffers(struct address_space *mapping)
517{
518 struct address_space *buffer_mapping = mapping->private_data;
519
520 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
521 return 0;
522
523 return fsync_buffers_list(&buffer_mapping->private_lock,
524 &mapping->private_list);
525}
526EXPORT_SYMBOL(sync_mapping_buffers);
527
528
529
530
531
532
533
534void write_boundary_block(struct block_device *bdev,
535 sector_t bblock, unsigned blocksize)
536{
537 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
538 if (bh) {
539 if (buffer_dirty(bh))
540 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
541 put_bh(bh);
542 }
543}
544
545void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
546{
547 struct address_space *mapping = inode->i_mapping;
548 struct address_space *buffer_mapping = bh->b_page->mapping;
549
550 mark_buffer_dirty(bh);
551 if (!mapping->private_data) {
552 mapping->private_data = buffer_mapping;
553 } else {
554 BUG_ON(mapping->private_data != buffer_mapping);
555 }
556 if (!bh->b_assoc_map) {
557 spin_lock(&buffer_mapping->private_lock);
558 list_move_tail(&bh->b_assoc_buffers,
559 &mapping->private_list);
560 bh->b_assoc_map = mapping;
561 spin_unlock(&buffer_mapping->private_lock);
562 }
563}
564EXPORT_SYMBOL(mark_buffer_dirty_inode);
565
566
567
568
569
570
571
572
573
574
575void __set_page_dirty(struct page *page, struct address_space *mapping,
576 int warn)
577{
578 unsigned long flags;
579
580 xa_lock_irqsave(&mapping->i_pages, flags);
581 if (page->mapping) {
582 WARN_ON_ONCE(warn && !PageUptodate(page));
583 account_page_dirtied(page, mapping);
584 __xa_set_mark(&mapping->i_pages, page_index(page),
585 PAGECACHE_TAG_DIRTY);
586 }
587 xa_unlock_irqrestore(&mapping->i_pages, flags);
588}
589EXPORT_SYMBOL_GPL(__set_page_dirty);
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616int __set_page_dirty_buffers(struct page *page)
617{
618 int newly_dirty;
619 struct address_space *mapping = page_mapping(page);
620
621 if (unlikely(!mapping))
622 return !TestSetPageDirty(page);
623
624 spin_lock(&mapping->private_lock);
625 if (page_has_buffers(page)) {
626 struct buffer_head *head = page_buffers(page);
627 struct buffer_head *bh = head;
628
629 do {
630 set_buffer_dirty(bh);
631 bh = bh->b_this_page;
632 } while (bh != head);
633 }
634
635
636
637
638 lock_page_memcg(page);
639 newly_dirty = !TestSetPageDirty(page);
640 spin_unlock(&mapping->private_lock);
641
642 if (newly_dirty)
643 __set_page_dirty(page, mapping, 1);
644
645 unlock_page_memcg(page);
646
647 if (newly_dirty)
648 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
649
650 return newly_dirty;
651}
652EXPORT_SYMBOL(__set_page_dirty_buffers);
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
674{
675 struct buffer_head *bh;
676 struct list_head tmp;
677 struct address_space *mapping;
678 int err = 0, err2;
679 struct blk_plug plug;
680
681 INIT_LIST_HEAD(&tmp);
682 blk_start_plug(&plug);
683
684 spin_lock(lock);
685 while (!list_empty(list)) {
686 bh = BH_ENTRY(list->next);
687 mapping = bh->b_assoc_map;
688 __remove_assoc_queue(bh);
689
690
691 smp_mb();
692 if (buffer_dirty(bh) || buffer_locked(bh)) {
693 list_add(&bh->b_assoc_buffers, &tmp);
694 bh->b_assoc_map = mapping;
695 if (buffer_dirty(bh)) {
696 get_bh(bh);
697 spin_unlock(lock);
698
699
700
701
702
703
704
705 write_dirty_buffer(bh, REQ_SYNC);
706
707
708
709
710
711
712
713 brelse(bh);
714 spin_lock(lock);
715 }
716 }
717 }
718
719 spin_unlock(lock);
720 blk_finish_plug(&plug);
721 spin_lock(lock);
722
723 while (!list_empty(&tmp)) {
724 bh = BH_ENTRY(tmp.prev);
725 get_bh(bh);
726 mapping = bh->b_assoc_map;
727 __remove_assoc_queue(bh);
728
729
730 smp_mb();
731 if (buffer_dirty(bh)) {
732 list_add(&bh->b_assoc_buffers,
733 &mapping->private_list);
734 bh->b_assoc_map = mapping;
735 }
736 spin_unlock(lock);
737 wait_on_buffer(bh);
738 if (!buffer_uptodate(bh))
739 err = -EIO;
740 brelse(bh);
741 spin_lock(lock);
742 }
743
744 spin_unlock(lock);
745 err2 = osync_buffers_list(lock, list);
746 if (err)
747 return err;
748 else
749 return err2;
750}
751
752
753
754
755
756
757
758
759
760
761void invalidate_inode_buffers(struct inode *inode)
762{
763 if (inode_has_buffers(inode)) {
764 struct address_space *mapping = &inode->i_data;
765 struct list_head *list = &mapping->private_list;
766 struct address_space *buffer_mapping = mapping->private_data;
767
768 spin_lock(&buffer_mapping->private_lock);
769 while (!list_empty(list))
770 __remove_assoc_queue(BH_ENTRY(list->next));
771 spin_unlock(&buffer_mapping->private_lock);
772 }
773}
774EXPORT_SYMBOL(invalidate_inode_buffers);
775
776
777
778
779
780
781
782int remove_inode_buffers(struct inode *inode)
783{
784 int ret = 1;
785
786 if (inode_has_buffers(inode)) {
787 struct address_space *mapping = &inode->i_data;
788 struct list_head *list = &mapping->private_list;
789 struct address_space *buffer_mapping = mapping->private_data;
790
791 spin_lock(&buffer_mapping->private_lock);
792 while (!list_empty(list)) {
793 struct buffer_head *bh = BH_ENTRY(list->next);
794 if (buffer_dirty(bh)) {
795 ret = 0;
796 break;
797 }
798 __remove_assoc_queue(bh);
799 }
800 spin_unlock(&buffer_mapping->private_lock);
801 }
802 return ret;
803}
804
805
806
807
808
809
810
811
812
813
814struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
815 bool retry)
816{
817 struct buffer_head *bh, *head;
818 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
819 long offset;
820 struct mem_cgroup *memcg;
821
822 if (retry)
823 gfp |= __GFP_NOFAIL;
824
825 memcg = get_mem_cgroup_from_page(page);
826 memalloc_use_memcg(memcg);
827
828 head = NULL;
829 offset = PAGE_SIZE;
830 while ((offset -= size) >= 0) {
831 bh = alloc_buffer_head(gfp);
832 if (!bh)
833 goto no_grow;
834
835 bh->b_this_page = head;
836 bh->b_blocknr = -1;
837 head = bh;
838
839 bh->b_size = size;
840
841
842 set_bh_page(bh, page, offset);
843 }
844out:
845 memalloc_unuse_memcg();
846 mem_cgroup_put(memcg);
847 return head;
848
849
850
851no_grow:
852 if (head) {
853 do {
854 bh = head;
855 head = head->b_this_page;
856 free_buffer_head(bh);
857 } while (head);
858 }
859
860 goto out;
861}
862EXPORT_SYMBOL_GPL(alloc_page_buffers);
863
864static inline void
865link_dev_buffers(struct page *page, struct buffer_head *head)
866{
867 struct buffer_head *bh, *tail;
868
869 bh = head;
870 do {
871 tail = bh;
872 bh = bh->b_this_page;
873 } while (bh);
874 tail->b_this_page = head;
875 attach_page_buffers(page, head);
876}
877
878static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
879{
880 sector_t retval = ~((sector_t)0);
881 loff_t sz = i_size_read(bdev->bd_inode);
882
883 if (sz) {
884 unsigned int sizebits = blksize_bits(size);
885 retval = (sz >> sizebits);
886 }
887 return retval;
888}
889
890
891
892
893static sector_t
894init_page_buffers(struct page *page, struct block_device *bdev,
895 sector_t block, int size)
896{
897 struct buffer_head *head = page_buffers(page);
898 struct buffer_head *bh = head;
899 int uptodate = PageUptodate(page);
900 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
901
902 do {
903 if (!buffer_mapped(bh)) {
904 bh->b_end_io = NULL;
905 bh->b_private = NULL;
906 bh->b_bdev = bdev;
907 bh->b_blocknr = block;
908 if (uptodate)
909 set_buffer_uptodate(bh);
910 if (block < end_block)
911 set_buffer_mapped(bh);
912 }
913 block++;
914 bh = bh->b_this_page;
915 } while (bh != head);
916
917
918
919
920 return end_block;
921}
922
923
924
925
926
927
928static int
929grow_dev_page(struct block_device *bdev, sector_t block,
930 pgoff_t index, int size, int sizebits, gfp_t gfp)
931{
932 struct inode *inode = bdev->bd_inode;
933 struct page *page;
934 struct buffer_head *bh;
935 sector_t end_block;
936 int ret = 0;
937 gfp_t gfp_mask;
938
939 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
940
941
942
943
944
945
946
947 gfp_mask |= __GFP_NOFAIL;
948
949 page = find_or_create_page(inode->i_mapping, index, gfp_mask);
950
951 BUG_ON(!PageLocked(page));
952
953 if (page_has_buffers(page)) {
954 bh = page_buffers(page);
955 if (bh->b_size == size) {
956 end_block = init_page_buffers(page, bdev,
957 (sector_t)index << sizebits,
958 size);
959 goto done;
960 }
961 if (!try_to_free_buffers(page))
962 goto failed;
963 }
964
965
966
967
968 bh = alloc_page_buffers(page, size, true);
969
970
971
972
973
974
975 spin_lock(&inode->i_mapping->private_lock);
976 link_dev_buffers(page, bh);
977 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
978 size);
979 spin_unlock(&inode->i_mapping->private_lock);
980done:
981 ret = (block < end_block) ? 1 : -ENXIO;
982failed:
983 unlock_page(page);
984 put_page(page);
985 return ret;
986}
987
988
989
990
991
992static int
993grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
994{
995 pgoff_t index;
996 int sizebits;
997
998 sizebits = -1;
999 do {
1000 sizebits++;
1001 } while ((size << sizebits) < PAGE_SIZE);
1002
1003 index = block >> sizebits;
1004
1005
1006
1007
1008
1009 if (unlikely(index != block >> sizebits)) {
1010 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1011 "device %pg\n",
1012 __func__, (unsigned long long)block,
1013 bdev);
1014 return -EIO;
1015 }
1016
1017
1018 return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1019}
1020
1021static struct buffer_head *
1022__getblk_slow(struct block_device *bdev, sector_t block,
1023 unsigned size, gfp_t gfp)
1024{
1025
1026 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1027 (size < 512 || size > PAGE_SIZE))) {
1028 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1029 size);
1030 printk(KERN_ERR "logical block size: %d\n",
1031 bdev_logical_block_size(bdev));
1032
1033 dump_stack();
1034 return NULL;
1035 }
1036
1037 for (;;) {
1038 struct buffer_head *bh;
1039 int ret;
1040
1041 bh = __find_get_block(bdev, block, size);
1042 if (bh)
1043 return bh;
1044
1045 ret = grow_buffers(bdev, block, size, gfp);
1046 if (ret < 0)
1047 return NULL;
1048 }
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void mark_buffer_dirty(struct buffer_head *bh)
1087{
1088 WARN_ON_ONCE(!buffer_uptodate(bh));
1089
1090 trace_block_dirty_buffer(bh);
1091
1092
1093
1094
1095
1096
1097
1098 if (buffer_dirty(bh)) {
1099 smp_mb();
1100 if (buffer_dirty(bh))
1101 return;
1102 }
1103
1104 if (!test_set_buffer_dirty(bh)) {
1105 struct page *page = bh->b_page;
1106 struct address_space *mapping = NULL;
1107
1108 lock_page_memcg(page);
1109 if (!TestSetPageDirty(page)) {
1110 mapping = page_mapping(page);
1111 if (mapping)
1112 __set_page_dirty(page, mapping, 0);
1113 }
1114 unlock_page_memcg(page);
1115 if (mapping)
1116 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1117 }
1118}
1119EXPORT_SYMBOL(mark_buffer_dirty);
1120
1121void mark_buffer_write_io_error(struct buffer_head *bh)
1122{
1123 set_buffer_write_io_error(bh);
1124
1125 if (bh->b_page && bh->b_page->mapping)
1126 mapping_set_error(bh->b_page->mapping, -EIO);
1127 if (bh->b_assoc_map)
1128 mapping_set_error(bh->b_assoc_map, -EIO);
1129}
1130EXPORT_SYMBOL(mark_buffer_write_io_error);
1131
1132
1133
1134
1135
1136
1137
1138
1139void __brelse(struct buffer_head * buf)
1140{
1141 if (atomic_read(&buf->b_count)) {
1142 put_bh(buf);
1143 return;
1144 }
1145 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1146}
1147EXPORT_SYMBOL(__brelse);
1148
1149
1150
1151
1152
1153void __bforget(struct buffer_head *bh)
1154{
1155 clear_buffer_dirty(bh);
1156 if (bh->b_assoc_map) {
1157 struct address_space *buffer_mapping = bh->b_page->mapping;
1158
1159 spin_lock(&buffer_mapping->private_lock);
1160 list_del_init(&bh->b_assoc_buffers);
1161 bh->b_assoc_map = NULL;
1162 spin_unlock(&buffer_mapping->private_lock);
1163 }
1164 __brelse(bh);
1165}
1166EXPORT_SYMBOL(__bforget);
1167
1168static struct buffer_head *__bread_slow(struct buffer_head *bh)
1169{
1170 lock_buffer(bh);
1171 if (buffer_uptodate(bh)) {
1172 unlock_buffer(bh);
1173 return bh;
1174 } else {
1175 get_bh(bh);
1176 bh->b_end_io = end_buffer_read_sync;
1177 submit_bh(REQ_OP_READ, 0, bh);
1178 wait_on_buffer(bh);
1179 if (buffer_uptodate(bh))
1180 return bh;
1181 }
1182 brelse(bh);
1183 return NULL;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200#define BH_LRU_SIZE 16
1201
1202struct bh_lru {
1203 struct buffer_head *bhs[BH_LRU_SIZE];
1204};
1205
1206static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1207
1208#ifdef CONFIG_SMP
1209#define bh_lru_lock() local_irq_disable()
1210#define bh_lru_unlock() local_irq_enable()
1211#else
1212#define bh_lru_lock() preempt_disable()
1213#define bh_lru_unlock() preempt_enable()
1214#endif
1215
1216static inline void check_irqs_on(void)
1217{
1218#ifdef irqs_disabled
1219 BUG_ON(irqs_disabled());
1220#endif
1221}
1222
1223
1224
1225
1226
1227
1228static void bh_lru_install(struct buffer_head *bh)
1229{
1230 struct buffer_head *evictee = bh;
1231 struct bh_lru *b;
1232 int i;
1233
1234 check_irqs_on();
1235 bh_lru_lock();
1236
1237 b = this_cpu_ptr(&bh_lrus);
1238 for (i = 0; i < BH_LRU_SIZE; i++) {
1239 swap(evictee, b->bhs[i]);
1240 if (evictee == bh) {
1241 bh_lru_unlock();
1242 return;
1243 }
1244 }
1245
1246 get_bh(bh);
1247 bh_lru_unlock();
1248 brelse(evictee);
1249}
1250
1251
1252
1253
1254static struct buffer_head *
1255lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1256{
1257 struct buffer_head *ret = NULL;
1258 unsigned int i;
1259
1260 check_irqs_on();
1261 bh_lru_lock();
1262 for (i = 0; i < BH_LRU_SIZE; i++) {
1263 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1264
1265 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1266 bh->b_size == size) {
1267 if (i) {
1268 while (i) {
1269 __this_cpu_write(bh_lrus.bhs[i],
1270 __this_cpu_read(bh_lrus.bhs[i - 1]));
1271 i--;
1272 }
1273 __this_cpu_write(bh_lrus.bhs[0], bh);
1274 }
1275 get_bh(bh);
1276 ret = bh;
1277 break;
1278 }
1279 }
1280 bh_lru_unlock();
1281 return ret;
1282}
1283
1284
1285
1286
1287
1288
1289struct buffer_head *
1290__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1291{
1292 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1293
1294 if (bh == NULL) {
1295
1296 bh = __find_get_block_slow(bdev, block);
1297 if (bh)
1298 bh_lru_install(bh);
1299 } else
1300 touch_buffer(bh);
1301
1302 return bh;
1303}
1304EXPORT_SYMBOL(__find_get_block);
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314struct buffer_head *
1315__getblk_gfp(struct block_device *bdev, sector_t block,
1316 unsigned size, gfp_t gfp)
1317{
1318 struct buffer_head *bh = __find_get_block(bdev, block, size);
1319
1320 might_sleep();
1321 if (bh == NULL)
1322 bh = __getblk_slow(bdev, block, size, gfp);
1323 return bh;
1324}
1325EXPORT_SYMBOL(__getblk_gfp);
1326
1327
1328
1329
1330void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1331{
1332 struct buffer_head *bh = __getblk(bdev, block, size);
1333 if (likely(bh)) {
1334 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
1335 brelse(bh);
1336 }
1337}
1338EXPORT_SYMBOL(__breadahead);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352struct buffer_head *
1353__bread_gfp(struct block_device *bdev, sector_t block,
1354 unsigned size, gfp_t gfp)
1355{
1356 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1357
1358 if (likely(bh) && !buffer_uptodate(bh))
1359 bh = __bread_slow(bh);
1360 return bh;
1361}
1362EXPORT_SYMBOL(__bread_gfp);
1363
1364
1365
1366
1367
1368
1369static void invalidate_bh_lru(void *arg)
1370{
1371 struct bh_lru *b = &get_cpu_var(bh_lrus);
1372 int i;
1373
1374 for (i = 0; i < BH_LRU_SIZE; i++) {
1375 brelse(b->bhs[i]);
1376 b->bhs[i] = NULL;
1377 }
1378 put_cpu_var(bh_lrus);
1379}
1380
1381static bool has_bh_in_lru(int cpu, void *dummy)
1382{
1383 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1384 int i;
1385
1386 for (i = 0; i < BH_LRU_SIZE; i++) {
1387 if (b->bhs[i])
1388 return 1;
1389 }
1390
1391 return 0;
1392}
1393
1394void invalidate_bh_lrus(void)
1395{
1396 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1397}
1398EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1399
1400void set_bh_page(struct buffer_head *bh,
1401 struct page *page, unsigned long offset)
1402{
1403 bh->b_page = page;
1404 BUG_ON(offset >= PAGE_SIZE);
1405 if (PageHighMem(page))
1406
1407
1408
1409 bh->b_data = (char *)(0 + offset);
1410 else
1411 bh->b_data = page_address(page) + offset;
1412}
1413EXPORT_SYMBOL(set_bh_page);
1414
1415
1416
1417
1418
1419
1420#define BUFFER_FLAGS_DISCARD \
1421 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1422 1 << BH_Delay | 1 << BH_Unwritten)
1423
1424static void discard_buffer(struct buffer_head * bh)
1425{
1426 unsigned long b_state, b_state_old;
1427
1428 lock_buffer(bh);
1429 clear_buffer_dirty(bh);
1430 bh->b_bdev = NULL;
1431 b_state = bh->b_state;
1432 for (;;) {
1433 b_state_old = cmpxchg(&bh->b_state, b_state,
1434 (b_state & ~BUFFER_FLAGS_DISCARD));
1435 if (b_state_old == b_state)
1436 break;
1437 b_state = b_state_old;
1438 }
1439 unlock_buffer(bh);
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458void block_invalidatepage(struct page *page, unsigned int offset,
1459 unsigned int length)
1460{
1461 struct buffer_head *head, *bh, *next;
1462 unsigned int curr_off = 0;
1463 unsigned int stop = length + offset;
1464
1465 BUG_ON(!PageLocked(page));
1466 if (!page_has_buffers(page))
1467 goto out;
1468
1469
1470
1471
1472 BUG_ON(stop > PAGE_SIZE || stop < length);
1473
1474 head = page_buffers(page);
1475 bh = head;
1476 do {
1477 unsigned int next_off = curr_off + bh->b_size;
1478 next = bh->b_this_page;
1479
1480
1481
1482
1483 if (next_off > stop)
1484 goto out;
1485
1486
1487
1488
1489 if (offset <= curr_off)
1490 discard_buffer(bh);
1491 curr_off = next_off;
1492 bh = next;
1493 } while (bh != head);
1494
1495
1496
1497
1498
1499
1500 if (length == PAGE_SIZE)
1501 try_to_release_page(page, 0);
1502out:
1503 return;
1504}
1505EXPORT_SYMBOL(block_invalidatepage);
1506
1507
1508
1509
1510
1511
1512
1513void create_empty_buffers(struct page *page,
1514 unsigned long blocksize, unsigned long b_state)
1515{
1516 struct buffer_head *bh, *head, *tail;
1517
1518 head = alloc_page_buffers(page, blocksize, true);
1519 bh = head;
1520 do {
1521 bh->b_state |= b_state;
1522 tail = bh;
1523 bh = bh->b_this_page;
1524 } while (bh);
1525 tail->b_this_page = head;
1526
1527 spin_lock(&page->mapping->private_lock);
1528 if (PageUptodate(page) || PageDirty(page)) {
1529 bh = head;
1530 do {
1531 if (PageDirty(page))
1532 set_buffer_dirty(bh);
1533 if (PageUptodate(page))
1534 set_buffer_uptodate(bh);
1535 bh = bh->b_this_page;
1536 } while (bh != head);
1537 }
1538 attach_page_buffers(page, head);
1539 spin_unlock(&page->mapping->private_lock);
1540}
1541EXPORT_SYMBOL(create_empty_buffers);
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1564{
1565 struct inode *bd_inode = bdev->bd_inode;
1566 struct address_space *bd_mapping = bd_inode->i_mapping;
1567 struct pagevec pvec;
1568 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1569 pgoff_t end;
1570 int i, count;
1571 struct buffer_head *bh;
1572 struct buffer_head *head;
1573
1574 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1575 pagevec_init(&pvec);
1576 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
1577 count = pagevec_count(&pvec);
1578 for (i = 0; i < count; i++) {
1579 struct page *page = pvec.pages[i];
1580
1581 if (!page_has_buffers(page))
1582 continue;
1583
1584
1585
1586
1587
1588 lock_page(page);
1589
1590 if (!page_has_buffers(page))
1591 goto unlock_page;
1592 head = page_buffers(page);
1593 bh = head;
1594 do {
1595 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1596 goto next;
1597 if (bh->b_blocknr >= block + len)
1598 break;
1599 clear_buffer_dirty(bh);
1600 wait_on_buffer(bh);
1601 clear_buffer_req(bh);
1602next:
1603 bh = bh->b_this_page;
1604 } while (bh != head);
1605unlock_page:
1606 unlock_page(page);
1607 }
1608 pagevec_release(&pvec);
1609 cond_resched();
1610
1611 if (index > end || !index)
1612 break;
1613 }
1614}
1615EXPORT_SYMBOL(clean_bdev_aliases);
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static inline int block_size_bits(unsigned int blocksize)
1626{
1627 return ilog2(blocksize);
1628}
1629
1630static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1631{
1632 BUG_ON(!PageLocked(page));
1633
1634 if (!page_has_buffers(page))
1635 create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
1636 b_state);
1637 return page_buffers(page);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669int __block_write_full_page(struct inode *inode, struct page *page,
1670 get_block_t *get_block, struct writeback_control *wbc,
1671 bh_end_io_t *handler)
1672{
1673 int err;
1674 sector_t block;
1675 sector_t last_block;
1676 struct buffer_head *bh, *head;
1677 unsigned int blocksize, bbits;
1678 int nr_underway = 0;
1679 int write_flags = wbc_to_write_flags(wbc);
1680
1681 head = create_page_buffers(page, inode,
1682 (1 << BH_Dirty)|(1 << BH_Uptodate));
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 bh = head;
1695 blocksize = bh->b_size;
1696 bbits = block_size_bits(blocksize);
1697
1698 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1699 last_block = (i_size_read(inode) - 1) >> bbits;
1700
1701
1702
1703
1704
1705 do {
1706 if (block > last_block) {
1707
1708
1709
1710
1711
1712
1713
1714
1715 clear_buffer_dirty(bh);
1716 set_buffer_uptodate(bh);
1717 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1718 buffer_dirty(bh)) {
1719 WARN_ON(bh->b_size != blocksize);
1720 err = get_block(inode, block, bh, 1);
1721 if (err)
1722 goto recover;
1723 clear_buffer_delay(bh);
1724 if (buffer_new(bh)) {
1725
1726 clear_buffer_new(bh);
1727 clean_bdev_bh_alias(bh);
1728 }
1729 }
1730 bh = bh->b_this_page;
1731 block++;
1732 } while (bh != head);
1733
1734 do {
1735 if (!buffer_mapped(bh))
1736 continue;
1737
1738
1739
1740
1741
1742
1743
1744 if (wbc->sync_mode != WB_SYNC_NONE) {
1745 lock_buffer(bh);
1746 } else if (!trylock_buffer(bh)) {
1747 redirty_page_for_writepage(wbc, page);
1748 continue;
1749 }
1750 if (test_clear_buffer_dirty(bh)) {
1751 mark_buffer_async_write_endio(bh, handler);
1752 } else {
1753 unlock_buffer(bh);
1754 }
1755 } while ((bh = bh->b_this_page) != head);
1756
1757
1758
1759
1760
1761 BUG_ON(PageWriteback(page));
1762 set_page_writeback(page);
1763
1764 do {
1765 struct buffer_head *next = bh->b_this_page;
1766 if (buffer_async_write(bh)) {
1767 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
1768 inode->i_write_hint, wbc);
1769 nr_underway++;
1770 }
1771 bh = next;
1772 } while (bh != head);
1773 unlock_page(page);
1774
1775 err = 0;
1776done:
1777 if (nr_underway == 0) {
1778
1779
1780
1781
1782
1783 end_page_writeback(page);
1784
1785
1786
1787
1788
1789 }
1790 return err;
1791
1792recover:
1793
1794
1795
1796
1797
1798
1799 bh = head;
1800
1801 do {
1802 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1803 !buffer_delay(bh)) {
1804 lock_buffer(bh);
1805 mark_buffer_async_write_endio(bh, handler);
1806 } else {
1807
1808
1809
1810
1811 clear_buffer_dirty(bh);
1812 }
1813 } while ((bh = bh->b_this_page) != head);
1814 SetPageError(page);
1815 BUG_ON(PageWriteback(page));
1816 mapping_set_error(page->mapping, err);
1817 set_page_writeback(page);
1818 do {
1819 struct buffer_head *next = bh->b_this_page;
1820 if (buffer_async_write(bh)) {
1821 clear_buffer_dirty(bh);
1822 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
1823 inode->i_write_hint, wbc);
1824 nr_underway++;
1825 }
1826 bh = next;
1827 } while (bh != head);
1828 unlock_page(page);
1829 goto done;
1830}
1831EXPORT_SYMBOL(__block_write_full_page);
1832
1833
1834
1835
1836
1837
1838void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1839{
1840 unsigned int block_start, block_end;
1841 struct buffer_head *head, *bh;
1842
1843 BUG_ON(!PageLocked(page));
1844 if (!page_has_buffers(page))
1845 return;
1846
1847 bh = head = page_buffers(page);
1848 block_start = 0;
1849 do {
1850 block_end = block_start + bh->b_size;
1851
1852 if (buffer_new(bh)) {
1853 if (block_end > from && block_start < to) {
1854 if (!PageUptodate(page)) {
1855 unsigned start, size;
1856
1857 start = max(from, block_start);
1858 size = min(to, block_end) - start;
1859
1860 zero_user(page, start, size);
1861 set_buffer_uptodate(bh);
1862 }
1863
1864 clear_buffer_new(bh);
1865 mark_buffer_dirty(bh);
1866 }
1867 }
1868
1869 block_start = block_end;
1870 bh = bh->b_this_page;
1871 } while (bh != head);
1872}
1873EXPORT_SYMBOL(page_zero_new_buffers);
1874
1875static void
1876iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1877 struct iomap *iomap)
1878{
1879 loff_t offset = block << inode->i_blkbits;
1880
1881 bh->b_bdev = iomap->bdev;
1882
1883
1884
1885
1886
1887
1888
1889 BUG_ON(offset >= iomap->offset + iomap->length);
1890
1891 switch (iomap->type) {
1892 case IOMAP_HOLE:
1893
1894
1895
1896
1897
1898 if (!buffer_uptodate(bh) ||
1899 (offset >= i_size_read(inode)))
1900 set_buffer_new(bh);
1901 break;
1902 case IOMAP_DELALLOC:
1903 if (!buffer_uptodate(bh) ||
1904 (offset >= i_size_read(inode)))
1905 set_buffer_new(bh);
1906 set_buffer_uptodate(bh);
1907 set_buffer_mapped(bh);
1908 set_buffer_delay(bh);
1909 break;
1910 case IOMAP_UNWRITTEN:
1911
1912
1913
1914
1915
1916 set_buffer_new(bh);
1917 set_buffer_unwritten(bh);
1918
1919 case IOMAP_MAPPED:
1920 if ((iomap->flags & IOMAP_F_NEW) ||
1921 offset >= i_size_read(inode))
1922 set_buffer_new(bh);
1923 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
1924 inode->i_blkbits;
1925 set_buffer_mapped(bh);
1926 break;
1927 }
1928}
1929
1930int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
1931 get_block_t *get_block, struct iomap *iomap)
1932{
1933 unsigned from = pos & (PAGE_SIZE - 1);
1934 unsigned to = from + len;
1935 struct inode *inode = page->mapping->host;
1936 unsigned block_start, block_end;
1937 sector_t block;
1938 int err = 0;
1939 unsigned blocksize, bbits;
1940 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1941
1942 BUG_ON(!PageLocked(page));
1943 BUG_ON(from > PAGE_SIZE);
1944 BUG_ON(to > PAGE_SIZE);
1945 BUG_ON(from > to);
1946
1947 head = create_page_buffers(page, inode, 0);
1948 blocksize = head->b_size;
1949 bbits = block_size_bits(blocksize);
1950
1951 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1952
1953 for(bh = head, block_start = 0; bh != head || !block_start;
1954 block++, block_start=block_end, bh = bh->b_this_page) {
1955 block_end = block_start + blocksize;
1956 if (block_end <= from || block_start >= to) {
1957 if (PageUptodate(page)) {
1958 if (!buffer_uptodate(bh))
1959 set_buffer_uptodate(bh);
1960 }
1961 continue;
1962 }
1963 if (buffer_new(bh))
1964 clear_buffer_new(bh);
1965 if (!buffer_mapped(bh)) {
1966 WARN_ON(bh->b_size != blocksize);
1967 if (get_block) {
1968 err = get_block(inode, block, bh, 1);
1969 if (err)
1970 break;
1971 } else {
1972 iomap_to_bh(inode, block, bh, iomap);
1973 }
1974
1975 if (buffer_new(bh)) {
1976 clean_bdev_bh_alias(bh);
1977 if (PageUptodate(page)) {
1978 clear_buffer_new(bh);
1979 set_buffer_uptodate(bh);
1980 mark_buffer_dirty(bh);
1981 continue;
1982 }
1983 if (block_end > to || block_start < from)
1984 zero_user_segments(page,
1985 to, block_end,
1986 block_start, from);
1987 continue;
1988 }
1989 }
1990 if (PageUptodate(page)) {
1991 if (!buffer_uptodate(bh))
1992 set_buffer_uptodate(bh);
1993 continue;
1994 }
1995 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1996 !buffer_unwritten(bh) &&
1997 (block_start < from || block_end > to)) {
1998 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1999 *wait_bh++=bh;
2000 }
2001 }
2002
2003
2004
2005 while(wait_bh > wait) {
2006 wait_on_buffer(*--wait_bh);
2007 if (!buffer_uptodate(*wait_bh))
2008 err = -EIO;
2009 }
2010 if (unlikely(err))
2011 page_zero_new_buffers(page, from, to);
2012 return err;
2013}
2014
2015int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2016 get_block_t *get_block)
2017{
2018 return __block_write_begin_int(page, pos, len, get_block, NULL);
2019}
2020EXPORT_SYMBOL(__block_write_begin);
2021
2022static int __block_commit_write(struct inode *inode, struct page *page,
2023 unsigned from, unsigned to)
2024{
2025 unsigned block_start, block_end;
2026 int partial = 0;
2027 unsigned blocksize;
2028 struct buffer_head *bh, *head;
2029
2030 bh = head = page_buffers(page);
2031 blocksize = bh->b_size;
2032
2033 block_start = 0;
2034 do {
2035 block_end = block_start + blocksize;
2036 if (block_end <= from || block_start >= to) {
2037 if (!buffer_uptodate(bh))
2038 partial = 1;
2039 } else {
2040 set_buffer_uptodate(bh);
2041 mark_buffer_dirty(bh);
2042 }
2043 clear_buffer_new(bh);
2044
2045 block_start = block_end;
2046 bh = bh->b_this_page;
2047 } while (bh != head);
2048
2049
2050
2051
2052
2053
2054
2055 if (!partial)
2056 SetPageUptodate(page);
2057 return 0;
2058}
2059
2060
2061
2062
2063
2064
2065
2066int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2067 unsigned flags, struct page **pagep, get_block_t *get_block)
2068{
2069 pgoff_t index = pos >> PAGE_SHIFT;
2070 struct page *page;
2071 int status;
2072
2073 page = grab_cache_page_write_begin(mapping, index, flags);
2074 if (!page)
2075 return -ENOMEM;
2076
2077 status = __block_write_begin(page, pos, len, get_block);
2078 if (unlikely(status)) {
2079 unlock_page(page);
2080 put_page(page);
2081 page = NULL;
2082 }
2083
2084 *pagep = page;
2085 return status;
2086}
2087EXPORT_SYMBOL(block_write_begin);
2088
2089int block_write_end(struct file *file, struct address_space *mapping,
2090 loff_t pos, unsigned len, unsigned copied,
2091 struct page *page, void *fsdata)
2092{
2093 struct inode *inode = mapping->host;
2094 unsigned start;
2095
2096 start = pos & (PAGE_SIZE - 1);
2097
2098 if (unlikely(copied < len)) {
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 if (!PageUptodate(page))
2112 copied = 0;
2113
2114 page_zero_new_buffers(page, start+copied, start+len);
2115 }
2116 flush_dcache_page(page);
2117
2118
2119 __block_commit_write(inode, page, start, start+copied);
2120
2121 return copied;
2122}
2123EXPORT_SYMBOL(block_write_end);
2124
2125int generic_write_end(struct file *file, struct address_space *mapping,
2126 loff_t pos, unsigned len, unsigned copied,
2127 struct page *page, void *fsdata)
2128{
2129 struct inode *inode = mapping->host;
2130 loff_t old_size = inode->i_size;
2131 bool i_size_changed = false;
2132
2133 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2134
2135
2136
2137
2138
2139
2140
2141
2142 if (pos + copied > inode->i_size) {
2143 i_size_write(inode, pos + copied);
2144 i_size_changed = true;
2145 }
2146
2147 unlock_page(page);
2148 put_page(page);
2149
2150 if (old_size < pos)
2151 pagecache_isize_extended(inode, old_size, pos);
2152
2153
2154
2155
2156
2157
2158 if (i_size_changed)
2159 mark_inode_dirty(inode);
2160 return copied;
2161}
2162EXPORT_SYMBOL(generic_write_end);
2163
2164
2165
2166
2167
2168
2169
2170
2171int block_is_partially_uptodate(struct page *page, unsigned long from,
2172 unsigned long count)
2173{
2174 unsigned block_start, block_end, blocksize;
2175 unsigned to;
2176 struct buffer_head *bh, *head;
2177 int ret = 1;
2178
2179 if (!page_has_buffers(page))
2180 return 0;
2181
2182 head = page_buffers(page);
2183 blocksize = head->b_size;
2184 to = min_t(unsigned, PAGE_SIZE - from, count);
2185 to = from + to;
2186 if (from < blocksize && to > PAGE_SIZE - blocksize)
2187 return 0;
2188
2189 bh = head;
2190 block_start = 0;
2191 do {
2192 block_end = block_start + blocksize;
2193 if (block_end > from && block_start < to) {
2194 if (!buffer_uptodate(bh)) {
2195 ret = 0;
2196 break;
2197 }
2198 if (block_end >= to)
2199 break;
2200 }
2201 block_start = block_end;
2202 bh = bh->b_this_page;
2203 } while (bh != head);
2204
2205 return ret;
2206}
2207EXPORT_SYMBOL(block_is_partially_uptodate);
2208
2209
2210
2211
2212
2213
2214
2215
2216int block_read_full_page(struct page *page, get_block_t *get_block)
2217{
2218 struct inode *inode = page->mapping->host;
2219 sector_t iblock, lblock;
2220 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2221 unsigned int blocksize, bbits;
2222 int nr, i;
2223 int fully_mapped = 1;
2224
2225 head = create_page_buffers(page, inode, 0);
2226 blocksize = head->b_size;
2227 bbits = block_size_bits(blocksize);
2228
2229 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2230 lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2231 bh = head;
2232 nr = 0;
2233 i = 0;
2234
2235 do {
2236 if (buffer_uptodate(bh))
2237 continue;
2238
2239 if (!buffer_mapped(bh)) {
2240 int err = 0;
2241
2242 fully_mapped = 0;
2243 if (iblock < lblock) {
2244 WARN_ON(bh->b_size != blocksize);
2245 err = get_block(inode, iblock, bh, 0);
2246 if (err)
2247 SetPageError(page);
2248 }
2249 if (!buffer_mapped(bh)) {
2250 zero_user(page, i * blocksize, blocksize);
2251 if (!err)
2252 set_buffer_uptodate(bh);
2253 continue;
2254 }
2255
2256
2257
2258
2259 if (buffer_uptodate(bh))
2260 continue;
2261 }
2262 arr[nr++] = bh;
2263 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2264
2265 if (fully_mapped)
2266 SetPageMappedToDisk(page);
2267
2268 if (!nr) {
2269
2270
2271
2272
2273 if (!PageError(page))
2274 SetPageUptodate(page);
2275 unlock_page(page);
2276 return 0;
2277 }
2278
2279
2280 for (i = 0; i < nr; i++) {
2281 bh = arr[i];
2282 lock_buffer(bh);
2283 mark_buffer_async_read(bh);
2284 }
2285
2286
2287
2288
2289
2290
2291 for (i = 0; i < nr; i++) {
2292 bh = arr[i];
2293 if (buffer_uptodate(bh))
2294 end_buffer_async_read(bh, 1);
2295 else
2296 submit_bh(REQ_OP_READ, 0, bh);
2297 }
2298 return 0;
2299}
2300EXPORT_SYMBOL(block_read_full_page);
2301
2302
2303
2304
2305
2306int generic_cont_expand_simple(struct inode *inode, loff_t size)
2307{
2308 struct address_space *mapping = inode->i_mapping;
2309 struct page *page;
2310 void *fsdata;
2311 int err;
2312
2313 err = inode_newsize_ok(inode, size);
2314 if (err)
2315 goto out;
2316
2317 err = pagecache_write_begin(NULL, mapping, size, 0,
2318 AOP_FLAG_CONT_EXPAND, &page, &fsdata);
2319 if (err)
2320 goto out;
2321
2322 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2323 BUG_ON(err > 0);
2324
2325out:
2326 return err;
2327}
2328EXPORT_SYMBOL(generic_cont_expand_simple);
2329
2330static int cont_expand_zero(struct file *file, struct address_space *mapping,
2331 loff_t pos, loff_t *bytes)
2332{
2333 struct inode *inode = mapping->host;
2334 unsigned int blocksize = i_blocksize(inode);
2335 struct page *page;
2336 void *fsdata;
2337 pgoff_t index, curidx;
2338 loff_t curpos;
2339 unsigned zerofrom, offset, len;
2340 int err = 0;
2341
2342 index = pos >> PAGE_SHIFT;
2343 offset = pos & ~PAGE_MASK;
2344
2345 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2346 zerofrom = curpos & ~PAGE_MASK;
2347 if (zerofrom & (blocksize-1)) {
2348 *bytes |= (blocksize-1);
2349 (*bytes)++;
2350 }
2351 len = PAGE_SIZE - zerofrom;
2352
2353 err = pagecache_write_begin(file, mapping, curpos, len, 0,
2354 &page, &fsdata);
2355 if (err)
2356 goto out;
2357 zero_user(page, zerofrom, len);
2358 err = pagecache_write_end(file, mapping, curpos, len, len,
2359 page, fsdata);
2360 if (err < 0)
2361 goto out;
2362 BUG_ON(err != len);
2363 err = 0;
2364
2365 balance_dirty_pages_ratelimited(mapping);
2366
2367 if (fatal_signal_pending(current)) {
2368 err = -EINTR;
2369 goto out;
2370 }
2371 }
2372
2373
2374 if (index == curidx) {
2375 zerofrom = curpos & ~PAGE_MASK;
2376
2377 if (offset <= zerofrom) {
2378 goto out;
2379 }
2380 if (zerofrom & (blocksize-1)) {
2381 *bytes |= (blocksize-1);
2382 (*bytes)++;
2383 }
2384 len = offset - zerofrom;
2385
2386 err = pagecache_write_begin(file, mapping, curpos, len, 0,
2387 &page, &fsdata);
2388 if (err)
2389 goto out;
2390 zero_user(page, zerofrom, len);
2391 err = pagecache_write_end(file, mapping, curpos, len, len,
2392 page, fsdata);
2393 if (err < 0)
2394 goto out;
2395 BUG_ON(err != len);
2396 err = 0;
2397 }
2398out:
2399 return err;
2400}
2401
2402
2403
2404
2405
2406int cont_write_begin(struct file *file, struct address_space *mapping,
2407 loff_t pos, unsigned len, unsigned flags,
2408 struct page **pagep, void **fsdata,
2409 get_block_t *get_block, loff_t *bytes)
2410{
2411 struct inode *inode = mapping->host;
2412 unsigned int blocksize = i_blocksize(inode);
2413 unsigned int zerofrom;
2414 int err;
2415
2416 err = cont_expand_zero(file, mapping, pos, bytes);
2417 if (err)
2418 return err;
2419
2420 zerofrom = *bytes & ~PAGE_MASK;
2421 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2422 *bytes |= (blocksize-1);
2423 (*bytes)++;
2424 }
2425
2426 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2427}
2428EXPORT_SYMBOL(cont_write_begin);
2429
2430int block_commit_write(struct page *page, unsigned from, unsigned to)
2431{
2432 struct inode *inode = page->mapping->host;
2433 __block_commit_write(inode,page,from,to);
2434 return 0;
2435}
2436EXPORT_SYMBOL(block_commit_write);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2457 get_block_t get_block)
2458{
2459 struct page *page = vmf->page;
2460 struct inode *inode = file_inode(vma->vm_file);
2461 unsigned long end;
2462 loff_t size;
2463 int ret;
2464
2465 lock_page(page);
2466 size = i_size_read(inode);
2467 if ((page->mapping != inode->i_mapping) ||
2468 (page_offset(page) > size)) {
2469
2470 ret = -EFAULT;
2471 goto out_unlock;
2472 }
2473
2474
2475 if (((page->index + 1) << PAGE_SHIFT) > size)
2476 end = size & ~PAGE_MASK;
2477 else
2478 end = PAGE_SIZE;
2479
2480 ret = __block_write_begin(page, 0, end, get_block);
2481 if (!ret)
2482 ret = block_commit_write(page, 0, end);
2483
2484 if (unlikely(ret < 0))
2485 goto out_unlock;
2486 set_page_dirty(page);
2487 wait_for_stable_page(page);
2488 return 0;
2489out_unlock:
2490 unlock_page(page);
2491 return ret;
2492}
2493EXPORT_SYMBOL(block_page_mkwrite);
2494
2495
2496
2497
2498
2499
2500static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2501{
2502 __end_buffer_read_notouch(bh, uptodate);
2503}
2504
2505
2506
2507
2508
2509
2510static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2511{
2512 struct buffer_head *bh;
2513
2514 BUG_ON(!PageLocked(page));
2515
2516 spin_lock(&page->mapping->private_lock);
2517 bh = head;
2518 do {
2519 if (PageDirty(page))
2520 set_buffer_dirty(bh);
2521 if (!bh->b_this_page)
2522 bh->b_this_page = head;
2523 bh = bh->b_this_page;
2524 } while (bh != head);
2525 attach_page_buffers(page, head);
2526 spin_unlock(&page->mapping->private_lock);
2527}
2528
2529
2530
2531
2532
2533
2534int nobh_write_begin(struct address_space *mapping,
2535 loff_t pos, unsigned len, unsigned flags,
2536 struct page **pagep, void **fsdata,
2537 get_block_t *get_block)
2538{
2539 struct inode *inode = mapping->host;
2540 const unsigned blkbits = inode->i_blkbits;
2541 const unsigned blocksize = 1 << blkbits;
2542 struct buffer_head *head, *bh;
2543 struct page *page;
2544 pgoff_t index;
2545 unsigned from, to;
2546 unsigned block_in_page;
2547 unsigned block_start, block_end;
2548 sector_t block_in_file;
2549 int nr_reads = 0;
2550 int ret = 0;
2551 int is_mapped_to_disk = 1;
2552
2553 index = pos >> PAGE_SHIFT;
2554 from = pos & (PAGE_SIZE - 1);
2555 to = from + len;
2556
2557 page = grab_cache_page_write_begin(mapping, index, flags);
2558 if (!page)
2559 return -ENOMEM;
2560 *pagep = page;
2561 *fsdata = NULL;
2562
2563 if (page_has_buffers(page)) {
2564 ret = __block_write_begin(page, pos, len, get_block);
2565 if (unlikely(ret))
2566 goto out_release;
2567 return ret;
2568 }
2569
2570 if (PageMappedToDisk(page))
2571 return 0;
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582 head = alloc_page_buffers(page, blocksize, false);
2583 if (!head) {
2584 ret = -ENOMEM;
2585 goto out_release;
2586 }
2587
2588 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2589
2590
2591
2592
2593
2594
2595 for (block_start = 0, block_in_page = 0, bh = head;
2596 block_start < PAGE_SIZE;
2597 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2598 int create;
2599
2600 block_end = block_start + blocksize;
2601 bh->b_state = 0;
2602 create = 1;
2603 if (block_start >= to)
2604 create = 0;
2605 ret = get_block(inode, block_in_file + block_in_page,
2606 bh, create);
2607 if (ret)
2608 goto failed;
2609 if (!buffer_mapped(bh))
2610 is_mapped_to_disk = 0;
2611 if (buffer_new(bh))
2612 clean_bdev_bh_alias(bh);
2613 if (PageUptodate(page)) {
2614 set_buffer_uptodate(bh);
2615 continue;
2616 }
2617 if (buffer_new(bh) || !buffer_mapped(bh)) {
2618 zero_user_segments(page, block_start, from,
2619 to, block_end);
2620 continue;
2621 }
2622 if (buffer_uptodate(bh))
2623 continue;
2624 if (block_start < from || block_end > to) {
2625 lock_buffer(bh);
2626 bh->b_end_io = end_buffer_read_nobh;
2627 submit_bh(REQ_OP_READ, 0, bh);
2628 nr_reads++;
2629 }
2630 }
2631
2632 if (nr_reads) {
2633
2634
2635
2636
2637
2638 for (bh = head; bh; bh = bh->b_this_page) {
2639 wait_on_buffer(bh);
2640 if (!buffer_uptodate(bh))
2641 ret = -EIO;
2642 }
2643 if (ret)
2644 goto failed;
2645 }
2646
2647 if (is_mapped_to_disk)
2648 SetPageMappedToDisk(page);
2649
2650 *fsdata = head;
2651
2652 return 0;
2653
2654failed:
2655 BUG_ON(!ret);
2656
2657
2658
2659
2660
2661
2662
2663 attach_nobh_buffers(page, head);
2664 page_zero_new_buffers(page, from, to);
2665
2666out_release:
2667 unlock_page(page);
2668 put_page(page);
2669 *pagep = NULL;
2670
2671 return ret;
2672}
2673EXPORT_SYMBOL(nobh_write_begin);
2674
2675int nobh_write_end(struct file *file, struct address_space *mapping,
2676 loff_t pos, unsigned len, unsigned copied,
2677 struct page *page, void *fsdata)
2678{
2679 struct inode *inode = page->mapping->host;
2680 struct buffer_head *head = fsdata;
2681 struct buffer_head *bh;
2682 BUG_ON(fsdata != NULL && page_has_buffers(page));
2683
2684 if (unlikely(copied < len) && head)
2685 attach_nobh_buffers(page, head);
2686 if (page_has_buffers(page))
2687 return generic_write_end(file, mapping, pos, len,
2688 copied, page, fsdata);
2689
2690 SetPageUptodate(page);
2691 set_page_dirty(page);
2692 if (pos+copied > inode->i_size) {
2693 i_size_write(inode, pos+copied);
2694 mark_inode_dirty(inode);
2695 }
2696
2697 unlock_page(page);
2698 put_page(page);
2699
2700 while (head) {
2701 bh = head;
2702 head = head->b_this_page;
2703 free_buffer_head(bh);
2704 }
2705
2706 return copied;
2707}
2708EXPORT_SYMBOL(nobh_write_end);
2709
2710
2711
2712
2713
2714
2715int nobh_writepage(struct page *page, get_block_t *get_block,
2716 struct writeback_control *wbc)
2717{
2718 struct inode * const inode = page->mapping->host;
2719 loff_t i_size = i_size_read(inode);
2720 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2721 unsigned offset;
2722 int ret;
2723
2724
2725 if (page->index < end_index)
2726 goto out;
2727
2728
2729 offset = i_size & (PAGE_SIZE-1);
2730 if (page->index >= end_index+1 || !offset) {
2731
2732
2733
2734
2735
2736#if 0
2737
2738 if (page->mapping->a_ops->invalidatepage)
2739 page->mapping->a_ops->invalidatepage(page, offset);
2740#endif
2741 unlock_page(page);
2742 return 0;
2743 }
2744
2745
2746
2747
2748
2749
2750
2751
2752 zero_user_segment(page, offset, PAGE_SIZE);
2753out:
2754 ret = mpage_writepage(page, get_block, wbc);
2755 if (ret == -EAGAIN)
2756 ret = __block_write_full_page(inode, page, get_block, wbc,
2757 end_buffer_async_write);
2758 return ret;
2759}
2760EXPORT_SYMBOL(nobh_writepage);
2761
2762int nobh_truncate_page(struct address_space *mapping,
2763 loff_t from, get_block_t *get_block)
2764{
2765 pgoff_t index = from >> PAGE_SHIFT;
2766 unsigned offset = from & (PAGE_SIZE-1);
2767 unsigned blocksize;
2768 sector_t iblock;
2769 unsigned length, pos;
2770 struct inode *inode = mapping->host;
2771 struct page *page;
2772 struct buffer_head map_bh;
2773 int err;
2774
2775 blocksize = i_blocksize(inode);
2776 length = offset & (blocksize - 1);
2777
2778
2779 if (!length)
2780 return 0;
2781
2782 length = blocksize - length;
2783 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2784
2785 page = grab_cache_page(mapping, index);
2786 err = -ENOMEM;
2787 if (!page)
2788 goto out;
2789
2790 if (page_has_buffers(page)) {
2791has_buffers:
2792 unlock_page(page);
2793 put_page(page);
2794 return block_truncate_page(mapping, from, get_block);
2795 }
2796
2797
2798 pos = blocksize;
2799 while (offset >= pos) {
2800 iblock++;
2801 pos += blocksize;
2802 }
2803
2804 map_bh.b_size = blocksize;
2805 map_bh.b_state = 0;
2806 err = get_block(inode, iblock, &map_bh, 0);
2807 if (err)
2808 goto unlock;
2809
2810 if (!buffer_mapped(&map_bh))
2811 goto unlock;
2812
2813
2814 if (!PageUptodate(page)) {
2815 err = mapping->a_ops->readpage(NULL, page);
2816 if (err) {
2817 put_page(page);
2818 goto out;
2819 }
2820 lock_page(page);
2821 if (!PageUptodate(page)) {
2822 err = -EIO;
2823 goto unlock;
2824 }
2825 if (page_has_buffers(page))
2826 goto has_buffers;
2827 }
2828 zero_user(page, offset, length);
2829 set_page_dirty(page);
2830 err = 0;
2831
2832unlock:
2833 unlock_page(page);
2834 put_page(page);
2835out:
2836 return err;
2837}
2838EXPORT_SYMBOL(nobh_truncate_page);
2839
2840int block_truncate_page(struct address_space *mapping,
2841 loff_t from, get_block_t *get_block)
2842{
2843 pgoff_t index = from >> PAGE_SHIFT;
2844 unsigned offset = from & (PAGE_SIZE-1);
2845 unsigned blocksize;
2846 sector_t iblock;
2847 unsigned length, pos;
2848 struct inode *inode = mapping->host;
2849 struct page *page;
2850 struct buffer_head *bh;
2851 int err;
2852
2853 blocksize = i_blocksize(inode);
2854 length = offset & (blocksize - 1);
2855
2856
2857 if (!length)
2858 return 0;
2859
2860 length = blocksize - length;
2861 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2862
2863 page = grab_cache_page(mapping, index);
2864 err = -ENOMEM;
2865 if (!page)
2866 goto out;
2867
2868 if (!page_has_buffers(page))
2869 create_empty_buffers(page, blocksize, 0);
2870
2871
2872 bh = page_buffers(page);
2873 pos = blocksize;
2874 while (offset >= pos) {
2875 bh = bh->b_this_page;
2876 iblock++;
2877 pos += blocksize;
2878 }
2879
2880 err = 0;
2881 if (!buffer_mapped(bh)) {
2882 WARN_ON(bh->b_size != blocksize);
2883 err = get_block(inode, iblock, bh, 0);
2884 if (err)
2885 goto unlock;
2886
2887 if (!buffer_mapped(bh))
2888 goto unlock;
2889 }
2890
2891
2892 if (PageUptodate(page))
2893 set_buffer_uptodate(bh);
2894
2895 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2896 err = -EIO;
2897 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
2898 wait_on_buffer(bh);
2899
2900 if (!buffer_uptodate(bh))
2901 goto unlock;
2902 }
2903
2904 zero_user(page, offset, length);
2905 mark_buffer_dirty(bh);
2906 err = 0;
2907
2908unlock:
2909 unlock_page(page);
2910 put_page(page);
2911out:
2912 return err;
2913}
2914EXPORT_SYMBOL(block_truncate_page);
2915
2916
2917
2918
2919int block_write_full_page(struct page *page, get_block_t *get_block,
2920 struct writeback_control *wbc)
2921{
2922 struct inode * const inode = page->mapping->host;
2923 loff_t i_size = i_size_read(inode);
2924 const pgoff_t end_index = i_size >> PAGE_SHIFT;
2925 unsigned offset;
2926
2927
2928 if (page->index < end_index)
2929 return __block_write_full_page(inode, page, get_block, wbc,
2930 end_buffer_async_write);
2931
2932
2933 offset = i_size & (PAGE_SIZE-1);
2934 if (page->index >= end_index+1 || !offset) {
2935
2936
2937
2938
2939
2940 do_invalidatepage(page, 0, PAGE_SIZE);
2941 unlock_page(page);
2942 return 0;
2943 }
2944
2945
2946
2947
2948
2949
2950
2951
2952 zero_user_segment(page, offset, PAGE_SIZE);
2953 return __block_write_full_page(inode, page, get_block, wbc,
2954 end_buffer_async_write);
2955}
2956EXPORT_SYMBOL(block_write_full_page);
2957
2958sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2959 get_block_t *get_block)
2960{
2961 struct inode *inode = mapping->host;
2962 struct buffer_head tmp = {
2963 .b_size = i_blocksize(inode),
2964 };
2965
2966 get_block(inode, block, &tmp, 0);
2967 return tmp.b_blocknr;
2968}
2969EXPORT_SYMBOL(generic_block_bmap);
2970
2971static void end_bio_bh_io_sync(struct bio *bio)
2972{
2973 struct buffer_head *bh = bio->bi_private;
2974
2975 if (unlikely(bio_flagged(bio, BIO_QUIET)))
2976 set_bit(BH_Quiet, &bh->b_state);
2977
2978 bh->b_end_io(bh, !bio->bi_status);
2979 bio_put(bio);
2980}
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994void guard_bio_eod(int op, struct bio *bio)
2995{
2996 sector_t maxsector;
2997 struct bio_vec *bvec = bio_last_bvec_all(bio);
2998 unsigned truncated_bytes;
2999 struct hd_struct *part;
3000
3001 rcu_read_lock();
3002 part = __disk_get_part(bio->bi_disk, bio->bi_partno);
3003 if (part)
3004 maxsector = part_nr_sects_read(part);
3005 else
3006 maxsector = get_capacity(bio->bi_disk);
3007 rcu_read_unlock();
3008
3009 if (!maxsector)
3010 return;
3011
3012
3013
3014
3015
3016
3017 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
3018 return;
3019
3020 maxsector -= bio->bi_iter.bi_sector;
3021 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
3022 return;
3023
3024
3025 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
3026
3027
3028
3029
3030
3031 if (truncated_bytes > bvec->bv_len)
3032 return;
3033
3034
3035 bio->bi_iter.bi_size -= truncated_bytes;
3036 bvec->bv_len -= truncated_bytes;
3037
3038
3039 if (op == REQ_OP_READ) {
3040 struct bio_vec bv;
3041
3042 mp_bvec_last_segment(bvec, &bv);
3043 zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
3044 truncated_bytes);
3045 }
3046}
3047
3048static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
3049 enum rw_hint write_hint, struct writeback_control *wbc)
3050{
3051 struct bio *bio;
3052
3053 BUG_ON(!buffer_locked(bh));
3054 BUG_ON(!buffer_mapped(bh));
3055 BUG_ON(!bh->b_end_io);
3056 BUG_ON(buffer_delay(bh));
3057 BUG_ON(buffer_unwritten(bh));
3058
3059
3060
3061
3062 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
3063 clear_buffer_write_io_error(bh);
3064
3065
3066
3067
3068
3069 bio = bio_alloc(GFP_NOIO, 1);
3070
3071 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3072 bio_set_dev(bio, bh->b_bdev);
3073 bio->bi_write_hint = write_hint;
3074
3075 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
3076 BUG_ON(bio->bi_iter.bi_size != bh->b_size);
3077
3078 bio->bi_end_io = end_bio_bh_io_sync;
3079 bio->bi_private = bh;
3080
3081
3082 guard_bio_eod(op, bio);
3083
3084 if (buffer_meta(bh))
3085 op_flags |= REQ_META;
3086 if (buffer_prio(bh))
3087 op_flags |= REQ_PRIO;
3088 bio_set_op_attrs(bio, op, op_flags);
3089
3090 if (wbc) {
3091 wbc_init_bio(wbc, bio);
3092 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
3093 }
3094
3095 submit_bio(bio);
3096 return 0;
3097}
3098
3099int submit_bh(int op, int op_flags, struct buffer_head *bh)
3100{
3101 return submit_bh_wbc(op, op_flags, bh, 0, NULL);
3102}
3103EXPORT_SYMBOL(submit_bh);
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
3132{
3133 int i;
3134
3135 for (i = 0; i < nr; i++) {
3136 struct buffer_head *bh = bhs[i];
3137
3138 if (!trylock_buffer(bh))
3139 continue;
3140 if (op == WRITE) {
3141 if (test_clear_buffer_dirty(bh)) {
3142 bh->b_end_io = end_buffer_write_sync;
3143 get_bh(bh);
3144 submit_bh(op, op_flags, bh);
3145 continue;
3146 }
3147 } else {
3148 if (!buffer_uptodate(bh)) {
3149 bh->b_end_io = end_buffer_read_sync;
3150 get_bh(bh);
3151 submit_bh(op, op_flags, bh);
3152 continue;
3153 }
3154 }
3155 unlock_buffer(bh);
3156 }
3157}
3158EXPORT_SYMBOL(ll_rw_block);
3159
3160void write_dirty_buffer(struct buffer_head *bh, int op_flags)
3161{
3162 lock_buffer(bh);
3163 if (!test_clear_buffer_dirty(bh)) {
3164 unlock_buffer(bh);
3165 return;
3166 }
3167 bh->b_end_io = end_buffer_write_sync;
3168 get_bh(bh);
3169 submit_bh(REQ_OP_WRITE, op_flags, bh);
3170}
3171EXPORT_SYMBOL(write_dirty_buffer);
3172
3173
3174
3175
3176
3177
3178int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
3179{
3180 int ret = 0;
3181
3182 WARN_ON(atomic_read(&bh->b_count) < 1);
3183 lock_buffer(bh);
3184 if (test_clear_buffer_dirty(bh)) {
3185 get_bh(bh);
3186 bh->b_end_io = end_buffer_write_sync;
3187 ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
3188 wait_on_buffer(bh);
3189 if (!ret && !buffer_uptodate(bh))
3190 ret = -EIO;
3191 } else {
3192 unlock_buffer(bh);
3193 }
3194 return ret;
3195}
3196EXPORT_SYMBOL(__sync_dirty_buffer);
3197
3198int sync_dirty_buffer(struct buffer_head *bh)
3199{
3200 return __sync_dirty_buffer(bh, REQ_SYNC);
3201}
3202EXPORT_SYMBOL(sync_dirty_buffer);
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224static inline int buffer_busy(struct buffer_head *bh)
3225{
3226 return atomic_read(&bh->b_count) |
3227 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3228}
3229
3230static int
3231drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3232{
3233 struct buffer_head *head = page_buffers(page);
3234 struct buffer_head *bh;
3235
3236 bh = head;
3237 do {
3238 if (buffer_busy(bh))
3239 goto failed;
3240 bh = bh->b_this_page;
3241 } while (bh != head);
3242
3243 do {
3244 struct buffer_head *next = bh->b_this_page;
3245
3246 if (bh->b_assoc_map)
3247 __remove_assoc_queue(bh);
3248 bh = next;
3249 } while (bh != head);
3250 *buffers_to_free = head;
3251 __clear_page_buffers(page);
3252 return 1;
3253failed:
3254 return 0;
3255}
3256
3257int try_to_free_buffers(struct page *page)
3258{
3259 struct address_space * const mapping = page->mapping;
3260 struct buffer_head *buffers_to_free = NULL;
3261 int ret = 0;
3262
3263 BUG_ON(!PageLocked(page));
3264 if (PageWriteback(page))
3265 return 0;
3266
3267 if (mapping == NULL) {
3268 ret = drop_buffers(page, &buffers_to_free);
3269 goto out;
3270 }
3271
3272 spin_lock(&mapping->private_lock);
3273 ret = drop_buffers(page, &buffers_to_free);
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289 if (ret)
3290 cancel_dirty_page(page);
3291 spin_unlock(&mapping->private_lock);
3292out:
3293 if (buffers_to_free) {
3294 struct buffer_head *bh = buffers_to_free;
3295
3296 do {
3297 struct buffer_head *next = bh->b_this_page;
3298 free_buffer_head(bh);
3299 bh = next;
3300 } while (bh != buffers_to_free);
3301 }
3302 return ret;
3303}
3304EXPORT_SYMBOL(try_to_free_buffers);
3305
3306
3307
3308
3309
3310
3311
3312
3313SYSCALL_DEFINE2(bdflush, int, func, long, data)
3314{
3315 static int msg_count;
3316
3317 if (!capable(CAP_SYS_ADMIN))
3318 return -EPERM;
3319
3320 if (msg_count < 5) {
3321 msg_count++;
3322 printk(KERN_INFO
3323 "warning: process `%s' used the obsolete bdflush"
3324 " system call\n", current->comm);
3325 printk(KERN_INFO "Fix your initscripts?\n");
3326 }
3327
3328 if (func == 1)
3329 do_exit(0);
3330 return 0;
3331}
3332
3333
3334
3335
3336static struct kmem_cache *bh_cachep __read_mostly;
3337
3338
3339
3340
3341
3342static unsigned long max_buffer_heads;
3343
3344int buffer_heads_over_limit;
3345
3346struct bh_accounting {
3347 int nr;
3348 int ratelimit;
3349};
3350
3351static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3352
3353static void recalc_bh_state(void)
3354{
3355 int i;
3356 int tot = 0;
3357
3358 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3359 return;
3360 __this_cpu_write(bh_accounting.ratelimit, 0);
3361 for_each_online_cpu(i)
3362 tot += per_cpu(bh_accounting, i).nr;
3363 buffer_heads_over_limit = (tot > max_buffer_heads);
3364}
3365
3366struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3367{
3368 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3369 if (ret) {
3370 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3371 preempt_disable();
3372 __this_cpu_inc(bh_accounting.nr);
3373 recalc_bh_state();
3374 preempt_enable();
3375 }
3376 return ret;
3377}
3378EXPORT_SYMBOL(alloc_buffer_head);
3379
3380void free_buffer_head(struct buffer_head *bh)
3381{
3382 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3383 kmem_cache_free(bh_cachep, bh);
3384 preempt_disable();
3385 __this_cpu_dec(bh_accounting.nr);
3386 recalc_bh_state();
3387 preempt_enable();
3388}
3389EXPORT_SYMBOL(free_buffer_head);
3390
3391static int buffer_exit_cpu_dead(unsigned int cpu)
3392{
3393 int i;
3394 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3395
3396 for (i = 0; i < BH_LRU_SIZE; i++) {
3397 brelse(b->bhs[i]);
3398 b->bhs[i] = NULL;
3399 }
3400 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3401 per_cpu(bh_accounting, cpu).nr = 0;
3402 return 0;
3403}
3404
3405
3406
3407
3408
3409
3410
3411
3412int bh_uptodate_or_lock(struct buffer_head *bh)
3413{
3414 if (!buffer_uptodate(bh)) {
3415 lock_buffer(bh);
3416 if (!buffer_uptodate(bh))
3417 return 0;
3418 unlock_buffer(bh);
3419 }
3420 return 1;
3421}
3422EXPORT_SYMBOL(bh_uptodate_or_lock);
3423
3424
3425
3426
3427
3428
3429
3430int bh_submit_read(struct buffer_head *bh)
3431{
3432 BUG_ON(!buffer_locked(bh));
3433
3434 if (buffer_uptodate(bh)) {
3435 unlock_buffer(bh);
3436 return 0;
3437 }
3438
3439 get_bh(bh);
3440 bh->b_end_io = end_buffer_read_sync;
3441 submit_bh(REQ_OP_READ, 0, bh);
3442 wait_on_buffer(bh);
3443 if (buffer_uptodate(bh))
3444 return 0;
3445 return -EIO;
3446}
3447EXPORT_SYMBOL(bh_submit_read);
3448
3449void __init buffer_init(void)
3450{
3451 unsigned long nrpages;
3452 int ret;
3453
3454 bh_cachep = kmem_cache_create("buffer_head",
3455 sizeof(struct buffer_head), 0,
3456 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3457 SLAB_MEM_SPREAD),
3458 NULL);
3459
3460
3461
3462
3463 nrpages = (nr_free_buffer_pages() * 10) / 100;
3464 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3465 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3466 NULL, buffer_exit_cpu_dead);
3467 WARN_ON(ret < 0);
3468}
3469