1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
27#include <linux/capability.h>
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
37#include <linux/task_io_accounting_ops.h>
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h>
44#include <linux/cleancache.h>
45
46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47
48#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50inline void
51init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52{
53 bh->b_end_io = handler;
54 bh->b_private = private;
55}
56EXPORT_SYMBOL(init_buffer);
57
58static int sleep_on_buffer(void *word)
59{
60 io_schedule();
61 return 0;
62}
63
64void __lock_buffer(struct buffer_head *bh)
65{
66 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
67 TASK_UNINTERRUPTIBLE);
68}
69EXPORT_SYMBOL(__lock_buffer);
70
71void unlock_buffer(struct buffer_head *bh)
72{
73 clear_bit_unlock(BH_Lock, &bh->b_state);
74 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
76}
77EXPORT_SYMBOL(unlock_buffer);
78
79
80
81
82
83
84void __wait_on_buffer(struct buffer_head * bh)
85{
86 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
87}
88EXPORT_SYMBOL(__wait_on_buffer);
89
90static void
91__clear_page_buffers(struct page *page)
92{
93 ClearPagePrivate(page);
94 set_page_private(page, 0);
95 page_cache_release(page);
96}
97
98
99static int quiet_error(struct buffer_head *bh)
100{
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
102 return 0;
103 return 1;
104}
105
106
107static void buffer_io_error(struct buffer_head *bh)
108{
109 char b[BDEVNAME_SIZE];
110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
113}
114
115
116
117
118
119
120
121
122
123static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
124{
125 if (uptodate) {
126 set_buffer_uptodate(bh);
127 } else {
128
129 clear_buffer_uptodate(bh);
130 }
131 unlock_buffer(bh);
132}
133
134
135
136
137
138void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
139{
140 __end_buffer_read_notouch(bh, uptodate);
141 put_bh(bh);
142}
143EXPORT_SYMBOL(end_buffer_read_sync);
144
145void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
146{
147 char b[BDEVNAME_SIZE];
148
149 if (uptodate) {
150 set_buffer_uptodate(bh);
151 } else {
152 if (!quiet_error(bh)) {
153 buffer_io_error(bh);
154 printk(KERN_WARNING "lost page write due to "
155 "I/O error on %s\n",
156 bdevname(bh->b_bdev, b));
157 }
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
160 }
161 unlock_buffer(bh);
162 put_bh(bh);
163}
164EXPORT_SYMBOL(end_buffer_write_sync);
165
166
167
168
169
170
171
172
173
174
175
176
177static struct buffer_head *
178__find_get_block_slow(struct block_device *bdev, sector_t block)
179{
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
183 pgoff_t index;
184 struct buffer_head *bh;
185 struct buffer_head *head;
186 struct page *page;
187 int all_mapped = 1;
188
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
191 if (!page)
192 goto out;
193
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
196 goto out_unlock;
197 head = page_buffers(page);
198 bh = head;
199 do {
200 if (!buffer_mapped(bh))
201 all_mapped = 0;
202 else if (bh->b_blocknr == block) {
203 ret = bh;
204 get_bh(bh);
205 goto out_unlock;
206 }
207 bh = bh->b_this_page;
208 } while (bh != head);
209
210
211
212
213
214
215 if (all_mapped) {
216 printk("__find_get_block_slow() failed. "
217 "block=%llu, b_blocknr=%llu\n",
218 (unsigned long long)block,
219 (unsigned long long)bh->b_blocknr);
220 printk("b_state=0x%08lx, b_size=%zu\n",
221 bh->b_state, bh->b_size);
222 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
223 }
224out_unlock:
225 spin_unlock(&bd_mapping->private_lock);
226 page_cache_release(page);
227out:
228 return ret;
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263void invalidate_bdev(struct block_device *bdev)
264{
265 struct address_space *mapping = bdev->bd_inode->i_mapping;
266
267 if (mapping->nrpages == 0)
268 return;
269
270 invalidate_bh_lrus();
271 lru_add_drain_all();
272 invalidate_mapping_pages(mapping, 0, -1);
273
274
275
276 cleancache_flush_inode(mapping);
277}
278EXPORT_SYMBOL(invalidate_bdev);
279
280
281
282
283static void free_more_memory(void)
284{
285 struct zone *zone;
286 int nid;
287
288 wakeup_flusher_threads(1024);
289 yield();
290
291 for_each_online_node(nid) {
292 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
293 gfp_zone(GFP_NOFS), NULL,
294 &zone);
295 if (zone)
296 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
297 GFP_NOFS, NULL);
298 }
299}
300
301
302
303
304
305static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
306{
307 unsigned long flags;
308 struct buffer_head *first;
309 struct buffer_head *tmp;
310 struct page *page;
311 int page_uptodate = 1;
312
313 BUG_ON(!buffer_async_read(bh));
314
315 page = bh->b_page;
316 if (uptodate) {
317 set_buffer_uptodate(bh);
318 } else {
319 clear_buffer_uptodate(bh);
320 if (!quiet_error(bh))
321 buffer_io_error(bh);
322 SetPageError(page);
323 }
324
325
326
327
328
329
330 first = page_buffers(page);
331 local_irq_save(flags);
332 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
333 clear_buffer_async_read(bh);
334 unlock_buffer(bh);
335 tmp = bh;
336 do {
337 if (!buffer_uptodate(tmp))
338 page_uptodate = 0;
339 if (buffer_async_read(tmp)) {
340 BUG_ON(!buffer_locked(tmp));
341 goto still_busy;
342 }
343 tmp = tmp->b_this_page;
344 } while (tmp != bh);
345 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
346 local_irq_restore(flags);
347
348
349
350
351
352 if (page_uptodate && !PageError(page))
353 SetPageUptodate(page);
354 unlock_page(page);
355 return;
356
357still_busy:
358 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
359 local_irq_restore(flags);
360 return;
361}
362
363
364
365
366
367void end_buffer_async_write(struct buffer_head *bh, int uptodate)
368{
369 char b[BDEVNAME_SIZE];
370 unsigned long flags;
371 struct buffer_head *first;
372 struct buffer_head *tmp;
373 struct page *page;
374
375 BUG_ON(!buffer_async_write(bh));
376
377 page = bh->b_page;
378 if (uptodate) {
379 set_buffer_uptodate(bh);
380 } else {
381 if (!quiet_error(bh)) {
382 buffer_io_error(bh);
383 printk(KERN_WARNING "lost page write due to "
384 "I/O error on %s\n",
385 bdevname(bh->b_bdev, b));
386 }
387 set_bit(AS_EIO, &page->mapping->flags);
388 set_buffer_write_io_error(bh);
389 clear_buffer_uptodate(bh);
390 SetPageError(page);
391 }
392
393 first = page_buffers(page);
394 local_irq_save(flags);
395 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
396
397 clear_buffer_async_write(bh);
398 unlock_buffer(bh);
399 tmp = bh->b_this_page;
400 while (tmp != bh) {
401 if (buffer_async_write(tmp)) {
402 BUG_ON(!buffer_locked(tmp));
403 goto still_busy;
404 }
405 tmp = tmp->b_this_page;
406 }
407 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 local_irq_restore(flags);
409 end_page_writeback(page);
410 return;
411
412still_busy:
413 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
414 local_irq_restore(flags);
415 return;
416}
417EXPORT_SYMBOL(end_buffer_async_write);
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static void mark_buffer_async_read(struct buffer_head *bh)
441{
442 bh->b_end_io = end_buffer_async_read;
443 set_buffer_async_read(bh);
444}
445
446static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
448{
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
451}
452
453void mark_buffer_async_write(struct buffer_head *bh)
454{
455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
456}
457EXPORT_SYMBOL(mark_buffer_async_write);
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512static void __remove_assoc_queue(struct buffer_head *bh)
513{
514 list_del_init(&bh->b_assoc_buffers);
515 WARN_ON(!bh->b_assoc_map);
516 if (buffer_write_io_error(bh))
517 set_bit(AS_EIO, &bh->b_assoc_map->flags);
518 bh->b_assoc_map = NULL;
519}
520
521int inode_has_buffers(struct inode *inode)
522{
523 return !list_empty(&inode->i_data.private_list);
524}
525
526
527
528
529
530
531
532
533
534
535
536static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
537{
538 struct buffer_head *bh;
539 struct list_head *p;
540 int err = 0;
541
542 spin_lock(lock);
543repeat:
544 list_for_each_prev(p, list) {
545 bh = BH_ENTRY(p);
546 if (buffer_locked(bh)) {
547 get_bh(bh);
548 spin_unlock(lock);
549 wait_on_buffer(bh);
550 if (!buffer_uptodate(bh))
551 err = -EIO;
552 brelse(bh);
553 spin_lock(lock);
554 goto repeat;
555 }
556 }
557 spin_unlock(lock);
558 return err;
559}
560
561static void do_thaw_one(struct super_block *sb, void *unused)
562{
563 char b[BDEVNAME_SIZE];
564 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
565 printk(KERN_WARNING "Emergency Thaw on %s\n",
566 bdevname(sb->s_bdev, b));
567}
568
569static void do_thaw_all(struct work_struct *work)
570{
571 iterate_supers(do_thaw_one, NULL);
572 kfree(work);
573 printk(KERN_WARNING "Emergency Thaw complete\n");
574}
575
576
577
578
579
580
581void emergency_thaw_all(void)
582{
583 struct work_struct *work;
584
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work) {
587 INIT_WORK(work, do_thaw_all);
588 schedule_work(work);
589 }
590}
591
592
593
594
595
596
597
598
599
600
601
602
603int sync_mapping_buffers(struct address_space *mapping)
604{
605 struct address_space *buffer_mapping = mapping->assoc_mapping;
606
607 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
608 return 0;
609
610 return fsync_buffers_list(&buffer_mapping->private_lock,
611 &mapping->private_list);
612}
613EXPORT_SYMBOL(sync_mapping_buffers);
614
615
616
617
618
619
620
621void write_boundary_block(struct block_device *bdev,
622 sector_t bblock, unsigned blocksize)
623{
624 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
625 if (bh) {
626 if (buffer_dirty(bh))
627 ll_rw_block(WRITE, 1, &bh);
628 put_bh(bh);
629 }
630}
631
632void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
633{
634 struct address_space *mapping = inode->i_mapping;
635 struct address_space *buffer_mapping = bh->b_page->mapping;
636
637 mark_buffer_dirty(bh);
638 if (!mapping->assoc_mapping) {
639 mapping->assoc_mapping = buffer_mapping;
640 } else {
641 BUG_ON(mapping->assoc_mapping != buffer_mapping);
642 }
643 if (!bh->b_assoc_map) {
644 spin_lock(&buffer_mapping->private_lock);
645 list_move_tail(&bh->b_assoc_buffers,
646 &mapping->private_list);
647 bh->b_assoc_map = mapping;
648 spin_unlock(&buffer_mapping->private_lock);
649 }
650}
651EXPORT_SYMBOL(mark_buffer_dirty_inode);
652
653
654
655
656
657
658
659
660static void __set_page_dirty(struct page *page,
661 struct address_space *mapping, int warn)
662{
663 spin_lock_irq(&mapping->tree_lock);
664 if (page->mapping) {
665 WARN_ON_ONCE(warn && !PageUptodate(page));
666 account_page_dirtied(page, mapping);
667 radix_tree_tag_set(&mapping->page_tree,
668 page_index(page), PAGECACHE_TAG_DIRTY);
669 }
670 spin_unlock_irq(&mapping->tree_lock);
671 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699int __set_page_dirty_buffers(struct page *page)
700{
701 int newly_dirty;
702 struct address_space *mapping = page_mapping(page);
703
704 if (unlikely(!mapping))
705 return !TestSetPageDirty(page);
706
707 spin_lock(&mapping->private_lock);
708 if (page_has_buffers(page)) {
709 struct buffer_head *head = page_buffers(page);
710 struct buffer_head *bh = head;
711
712 do {
713 set_buffer_dirty(bh);
714 bh = bh->b_this_page;
715 } while (bh != head);
716 }
717 newly_dirty = !TestSetPageDirty(page);
718 spin_unlock(&mapping->private_lock);
719
720 if (newly_dirty)
721 __set_page_dirty(page, mapping, 1);
722 return newly_dirty;
723}
724EXPORT_SYMBOL(__set_page_dirty_buffers);
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
746{
747 struct buffer_head *bh;
748 struct list_head tmp;
749 struct address_space *mapping;
750 int err = 0, err2;
751 struct blk_plug plug;
752
753 INIT_LIST_HEAD(&tmp);
754 blk_start_plug(&plug);
755
756 spin_lock(lock);
757 while (!list_empty(list)) {
758 bh = BH_ENTRY(list->next);
759 mapping = bh->b_assoc_map;
760 __remove_assoc_queue(bh);
761
762
763 smp_mb();
764 if (buffer_dirty(bh) || buffer_locked(bh)) {
765 list_add(&bh->b_assoc_buffers, &tmp);
766 bh->b_assoc_map = mapping;
767 if (buffer_dirty(bh)) {
768 get_bh(bh);
769 spin_unlock(lock);
770
771
772
773
774
775
776
777 write_dirty_buffer(bh, WRITE_SYNC);
778
779
780
781
782
783
784
785 brelse(bh);
786 spin_lock(lock);
787 }
788 }
789 }
790
791 spin_unlock(lock);
792 blk_finish_plug(&plug);
793 spin_lock(lock);
794
795 while (!list_empty(&tmp)) {
796 bh = BH_ENTRY(tmp.prev);
797 get_bh(bh);
798 mapping = bh->b_assoc_map;
799 __remove_assoc_queue(bh);
800
801
802 smp_mb();
803 if (buffer_dirty(bh)) {
804 list_add(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 bh->b_assoc_map = mapping;
807 }
808 spin_unlock(lock);
809 wait_on_buffer(bh);
810 if (!buffer_uptodate(bh))
811 err = -EIO;
812 brelse(bh);
813 spin_lock(lock);
814 }
815
816 spin_unlock(lock);
817 err2 = osync_buffers_list(lock, list);
818 if (err)
819 return err;
820 else
821 return err2;
822}
823
824
825
826
827
828
829
830
831
832
833void invalidate_inode_buffers(struct inode *inode)
834{
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
839
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list))
842 __remove_assoc_queue(BH_ENTRY(list->next));
843 spin_unlock(&buffer_mapping->private_lock);
844 }
845}
846EXPORT_SYMBOL(invalidate_inode_buffers);
847
848
849
850
851
852
853
854int remove_inode_buffers(struct inode *inode)
855{
856 int ret = 1;
857
858 if (inode_has_buffers(inode)) {
859 struct address_space *mapping = &inode->i_data;
860 struct list_head *list = &mapping->private_list;
861 struct address_space *buffer_mapping = mapping->assoc_mapping;
862
863 spin_lock(&buffer_mapping->private_lock);
864 while (!list_empty(list)) {
865 struct buffer_head *bh = BH_ENTRY(list->next);
866 if (buffer_dirty(bh)) {
867 ret = 0;
868 break;
869 }
870 __remove_assoc_queue(bh);
871 }
872 spin_unlock(&buffer_mapping->private_lock);
873 }
874 return ret;
875}
876
877
878
879
880
881
882
883
884
885
886struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
887 int retry)
888{
889 struct buffer_head *bh, *head;
890 long offset;
891
892try_again:
893 head = NULL;
894 offset = PAGE_SIZE;
895 while ((offset -= size) >= 0) {
896 bh = alloc_buffer_head(GFP_NOFS);
897 if (!bh)
898 goto no_grow;
899
900 bh->b_bdev = NULL;
901 bh->b_this_page = head;
902 bh->b_blocknr = -1;
903 head = bh;
904
905 bh->b_state = 0;
906 atomic_set(&bh->b_count, 0);
907 bh->b_size = size;
908
909
910 set_bh_page(bh, page, offset);
911
912 init_buffer(bh, NULL, NULL);
913 }
914 return head;
915
916
917
918no_grow:
919 if (head) {
920 do {
921 bh = head;
922 head = head->b_this_page;
923 free_buffer_head(bh);
924 } while (head);
925 }
926
927
928
929
930
931
932
933 if (!retry)
934 return NULL;
935
936
937
938
939
940
941
942 free_more_memory();
943 goto try_again;
944}
945EXPORT_SYMBOL_GPL(alloc_page_buffers);
946
947static inline void
948link_dev_buffers(struct page *page, struct buffer_head *head)
949{
950 struct buffer_head *bh, *tail;
951
952 bh = head;
953 do {
954 tail = bh;
955 bh = bh->b_this_page;
956 } while (bh);
957 tail->b_this_page = head;
958 attach_page_buffers(page, head);
959}
960
961
962
963
964static void
965init_page_buffers(struct page *page, struct block_device *bdev,
966 sector_t block, int size)
967{
968 struct buffer_head *head = page_buffers(page);
969 struct buffer_head *bh = head;
970 int uptodate = PageUptodate(page);
971
972 do {
973 if (!buffer_mapped(bh)) {
974 init_buffer(bh, NULL, NULL);
975 bh->b_bdev = bdev;
976 bh->b_blocknr = block;
977 if (uptodate)
978 set_buffer_uptodate(bh);
979 set_buffer_mapped(bh);
980 }
981 block++;
982 bh = bh->b_this_page;
983 } while (bh != head);
984}
985
986
987
988
989
990
991static struct page *
992grow_dev_page(struct block_device *bdev, sector_t block,
993 pgoff_t index, int size)
994{
995 struct inode *inode = bdev->bd_inode;
996 struct page *page;
997 struct buffer_head *bh;
998
999 page = find_or_create_page(inode->i_mapping, index,
1000 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1001 if (!page)
1002 return NULL;
1003
1004 BUG_ON(!PageLocked(page));
1005
1006 if (page_has_buffers(page)) {
1007 bh = page_buffers(page);
1008 if (bh->b_size == size) {
1009 init_page_buffers(page, bdev, block, size);
1010 return page;
1011 }
1012 if (!try_to_free_buffers(page))
1013 goto failed;
1014 }
1015
1016
1017
1018
1019 bh = alloc_page_buffers(page, size, 0);
1020 if (!bh)
1021 goto failed;
1022
1023
1024
1025
1026
1027
1028 spin_lock(&inode->i_mapping->private_lock);
1029 link_dev_buffers(page, bh);
1030 init_page_buffers(page, bdev, block, size);
1031 spin_unlock(&inode->i_mapping->private_lock);
1032 return page;
1033
1034failed:
1035 BUG();
1036 unlock_page(page);
1037 page_cache_release(page);
1038 return NULL;
1039}
1040
1041
1042
1043
1044
1045static int
1046grow_buffers(struct block_device *bdev, sector_t block, int size)
1047{
1048 struct page *page;
1049 pgoff_t index;
1050 int sizebits;
1051
1052 sizebits = -1;
1053 do {
1054 sizebits++;
1055 } while ((size << sizebits) < PAGE_SIZE);
1056
1057 index = block >> sizebits;
1058
1059
1060
1061
1062
1063 if (unlikely(index != block >> sizebits)) {
1064 char b[BDEVNAME_SIZE];
1065
1066 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1067 "device %s\n",
1068 __func__, (unsigned long long)block,
1069 bdevname(bdev, b));
1070 return -EIO;
1071 }
1072 block = index << sizebits;
1073
1074 page = grow_dev_page(bdev, block, index, size);
1075 if (!page)
1076 return 0;
1077 unlock_page(page);
1078 page_cache_release(page);
1079 return 1;
1080}
1081
1082static struct buffer_head *
1083__getblk_slow(struct block_device *bdev, sector_t block, int size)
1084{
1085
1086 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1087 (size < 512 || size > PAGE_SIZE))) {
1088 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1089 size);
1090 printk(KERN_ERR "logical block size: %d\n",
1091 bdev_logical_block_size(bdev));
1092
1093 dump_stack();
1094 return NULL;
1095 }
1096
1097 for (;;) {
1098 struct buffer_head * bh;
1099 int ret;
1100
1101 bh = __find_get_block(bdev, block, size);
1102 if (bh)
1103 return bh;
1104
1105 ret = grow_buffers(bdev, block, size);
1106 if (ret < 0)
1107 return NULL;
1108 if (ret == 0)
1109 free_more_memory();
1110 }
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148void mark_buffer_dirty(struct buffer_head *bh)
1149{
1150 WARN_ON_ONCE(!buffer_uptodate(bh));
1151
1152
1153
1154
1155
1156
1157
1158 if (buffer_dirty(bh)) {
1159 smp_mb();
1160 if (buffer_dirty(bh))
1161 return;
1162 }
1163
1164 if (!test_set_buffer_dirty(bh)) {
1165 struct page *page = bh->b_page;
1166 if (!TestSetPageDirty(page)) {
1167 struct address_space *mapping = page_mapping(page);
1168 if (mapping)
1169 __set_page_dirty(page, mapping, 0);
1170 }
1171 }
1172}
1173EXPORT_SYMBOL(mark_buffer_dirty);
1174
1175
1176
1177
1178
1179
1180
1181
1182void __brelse(struct buffer_head * buf)
1183{
1184 if (atomic_read(&buf->b_count)) {
1185 put_bh(buf);
1186 return;
1187 }
1188 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1189}
1190EXPORT_SYMBOL(__brelse);
1191
1192
1193
1194
1195
1196void __bforget(struct buffer_head *bh)
1197{
1198 clear_buffer_dirty(bh);
1199 if (bh->b_assoc_map) {
1200 struct address_space *buffer_mapping = bh->b_page->mapping;
1201
1202 spin_lock(&buffer_mapping->private_lock);
1203 list_del_init(&bh->b_assoc_buffers);
1204 bh->b_assoc_map = NULL;
1205 spin_unlock(&buffer_mapping->private_lock);
1206 }
1207 __brelse(bh);
1208}
1209EXPORT_SYMBOL(__bforget);
1210
1211static struct buffer_head *__bread_slow(struct buffer_head *bh)
1212{
1213 lock_buffer(bh);
1214 if (buffer_uptodate(bh)) {
1215 unlock_buffer(bh);
1216 return bh;
1217 } else {
1218 get_bh(bh);
1219 bh->b_end_io = end_buffer_read_sync;
1220 submit_bh(READ, bh);
1221 wait_on_buffer(bh);
1222 if (buffer_uptodate(bh))
1223 return bh;
1224 }
1225 brelse(bh);
1226 return NULL;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243#define BH_LRU_SIZE 8
1244
1245struct bh_lru {
1246 struct buffer_head *bhs[BH_LRU_SIZE];
1247};
1248
1249static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1250
1251#ifdef CONFIG_SMP
1252#define bh_lru_lock() local_irq_disable()
1253#define bh_lru_unlock() local_irq_enable()
1254#else
1255#define bh_lru_lock() preempt_disable()
1256#define bh_lru_unlock() preempt_enable()
1257#endif
1258
1259static inline void check_irqs_on(void)
1260{
1261#ifdef irqs_disabled
1262 BUG_ON(irqs_disabled());
1263#endif
1264}
1265
1266
1267
1268
1269static void bh_lru_install(struct buffer_head *bh)
1270{
1271 struct buffer_head *evictee = NULL;
1272
1273 check_irqs_on();
1274 bh_lru_lock();
1275 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1276 struct buffer_head *bhs[BH_LRU_SIZE];
1277 int in;
1278 int out = 0;
1279
1280 get_bh(bh);
1281 bhs[out++] = bh;
1282 for (in = 0; in < BH_LRU_SIZE; in++) {
1283 struct buffer_head *bh2 =
1284 __this_cpu_read(bh_lrus.bhs[in]);
1285
1286 if (bh2 == bh) {
1287 __brelse(bh2);
1288 } else {
1289 if (out >= BH_LRU_SIZE) {
1290 BUG_ON(evictee != NULL);
1291 evictee = bh2;
1292 } else {
1293 bhs[out++] = bh2;
1294 }
1295 }
1296 }
1297 while (out < BH_LRU_SIZE)
1298 bhs[out++] = NULL;
1299 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1300 }
1301 bh_lru_unlock();
1302
1303 if (evictee)
1304 __brelse(evictee);
1305}
1306
1307
1308
1309
1310static struct buffer_head *
1311lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1312{
1313 struct buffer_head *ret = NULL;
1314 unsigned int i;
1315
1316 check_irqs_on();
1317 bh_lru_lock();
1318 for (i = 0; i < BH_LRU_SIZE; i++) {
1319 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1320
1321 if (bh && bh->b_bdev == bdev &&
1322 bh->b_blocknr == block && bh->b_size == size) {
1323 if (i) {
1324 while (i) {
1325 __this_cpu_write(bh_lrus.bhs[i],
1326 __this_cpu_read(bh_lrus.bhs[i - 1]));
1327 i--;
1328 }
1329 __this_cpu_write(bh_lrus.bhs[0], bh);
1330 }
1331 get_bh(bh);
1332 ret = bh;
1333 break;
1334 }
1335 }
1336 bh_lru_unlock();
1337 return ret;
1338}
1339
1340
1341
1342
1343
1344
1345struct buffer_head *
1346__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1347{
1348 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1349
1350 if (bh == NULL) {
1351 bh = __find_get_block_slow(bdev, block);
1352 if (bh)
1353 bh_lru_install(bh);
1354 }
1355 if (bh)
1356 touch_buffer(bh);
1357 return bh;
1358}
1359EXPORT_SYMBOL(__find_get_block);
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373struct buffer_head *
1374__getblk(struct block_device *bdev, sector_t block, unsigned size)
1375{
1376 struct buffer_head *bh = __find_get_block(bdev, block, size);
1377
1378 might_sleep();
1379 if (bh == NULL)
1380 bh = __getblk_slow(bdev, block, size);
1381 return bh;
1382}
1383EXPORT_SYMBOL(__getblk);
1384
1385
1386
1387
1388void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1389{
1390 struct buffer_head *bh = __getblk(bdev, block, size);
1391 if (likely(bh)) {
1392 ll_rw_block(READA, 1, &bh);
1393 brelse(bh);
1394 }
1395}
1396EXPORT_SYMBOL(__breadahead);
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407struct buffer_head *
1408__bread(struct block_device *bdev, sector_t block, unsigned size)
1409{
1410 struct buffer_head *bh = __getblk(bdev, block, size);
1411
1412 if (likely(bh) && !buffer_uptodate(bh))
1413 bh = __bread_slow(bh);
1414 return bh;
1415}
1416EXPORT_SYMBOL(__bread);
1417
1418
1419
1420
1421
1422
1423static void invalidate_bh_lru(void *arg)
1424{
1425 struct bh_lru *b = &get_cpu_var(bh_lrus);
1426 int i;
1427
1428 for (i = 0; i < BH_LRU_SIZE; i++) {
1429 brelse(b->bhs[i]);
1430 b->bhs[i] = NULL;
1431 }
1432 put_cpu_var(bh_lrus);
1433}
1434
1435void invalidate_bh_lrus(void)
1436{
1437 on_each_cpu(invalidate_bh_lru, NULL, 1);
1438}
1439EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1440
1441void set_bh_page(struct buffer_head *bh,
1442 struct page *page, unsigned long offset)
1443{
1444 bh->b_page = page;
1445 BUG_ON(offset >= PAGE_SIZE);
1446 if (PageHighMem(page))
1447
1448
1449
1450 bh->b_data = (char *)(0 + offset);
1451 else
1452 bh->b_data = page_address(page) + offset;
1453}
1454EXPORT_SYMBOL(set_bh_page);
1455
1456
1457
1458
1459static void discard_buffer(struct buffer_head * bh)
1460{
1461 lock_buffer(bh);
1462 clear_buffer_dirty(bh);
1463 bh->b_bdev = NULL;
1464 clear_buffer_mapped(bh);
1465 clear_buffer_req(bh);
1466 clear_buffer_new(bh);
1467 clear_buffer_delay(bh);
1468 clear_buffer_unwritten(bh);
1469 unlock_buffer(bh);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487void block_invalidatepage(struct page *page, unsigned long offset)
1488{
1489 struct buffer_head *head, *bh, *next;
1490 unsigned int curr_off = 0;
1491
1492 BUG_ON(!PageLocked(page));
1493 if (!page_has_buffers(page))
1494 goto out;
1495
1496 head = page_buffers(page);
1497 bh = head;
1498 do {
1499 unsigned int next_off = curr_off + bh->b_size;
1500 next = bh->b_this_page;
1501
1502
1503
1504
1505 if (offset <= curr_off)
1506 discard_buffer(bh);
1507 curr_off = next_off;
1508 bh = next;
1509 } while (bh != head);
1510
1511
1512
1513
1514
1515
1516 if (offset == 0)
1517 try_to_release_page(page, 0);
1518out:
1519 return;
1520}
1521EXPORT_SYMBOL(block_invalidatepage);
1522
1523
1524
1525
1526
1527
1528void create_empty_buffers(struct page *page,
1529 unsigned long blocksize, unsigned long b_state)
1530{
1531 struct buffer_head *bh, *head, *tail;
1532
1533 head = alloc_page_buffers(page, blocksize, 1);
1534 bh = head;
1535 do {
1536 bh->b_state |= b_state;
1537 tail = bh;
1538 bh = bh->b_this_page;
1539 } while (bh);
1540 tail->b_this_page = head;
1541
1542 spin_lock(&page->mapping->private_lock);
1543 if (PageUptodate(page) || PageDirty(page)) {
1544 bh = head;
1545 do {
1546 if (PageDirty(page))
1547 set_buffer_dirty(bh);
1548 if (PageUptodate(page))
1549 set_buffer_uptodate(bh);
1550 bh = bh->b_this_page;
1551 } while (bh != head);
1552 }
1553 attach_page_buffers(page, head);
1554 spin_unlock(&page->mapping->private_lock);
1555}
1556EXPORT_SYMBOL(create_empty_buffers);
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1575{
1576 struct buffer_head *old_bh;
1577
1578 might_sleep();
1579
1580 old_bh = __find_get_block_slow(bdev, block);
1581 if (old_bh) {
1582 clear_buffer_dirty(old_bh);
1583 wait_on_buffer(old_bh);
1584 clear_buffer_req(old_bh);
1585 __brelse(old_bh);
1586 }
1587}
1588EXPORT_SYMBOL(unmap_underlying_metadata);
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static int __block_write_full_page(struct inode *inode, struct page *page,
1620 get_block_t *get_block, struct writeback_control *wbc,
1621 bh_end_io_t *handler)
1622{
1623 int err;
1624 sector_t block;
1625 sector_t last_block;
1626 struct buffer_head *bh, *head;
1627 const unsigned blocksize = 1 << inode->i_blkbits;
1628 int nr_underway = 0;
1629 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1630 WRITE_SYNC : WRITE);
1631
1632 BUG_ON(!PageLocked(page));
1633
1634 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1635
1636 if (!page_has_buffers(page)) {
1637 create_empty_buffers(page, blocksize,
1638 (1 << BH_Dirty)|(1 << BH_Uptodate));
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1652 head = page_buffers(page);
1653 bh = head;
1654
1655
1656
1657
1658
1659 do {
1660 if (block > last_block) {
1661
1662
1663
1664
1665
1666
1667
1668
1669 clear_buffer_dirty(bh);
1670 set_buffer_uptodate(bh);
1671 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1672 buffer_dirty(bh)) {
1673 WARN_ON(bh->b_size != blocksize);
1674 err = get_block(inode, block, bh, 1);
1675 if (err)
1676 goto recover;
1677 clear_buffer_delay(bh);
1678 if (buffer_new(bh)) {
1679
1680 clear_buffer_new(bh);
1681 unmap_underlying_metadata(bh->b_bdev,
1682 bh->b_blocknr);
1683 }
1684 }
1685 bh = bh->b_this_page;
1686 block++;
1687 } while (bh != head);
1688
1689 do {
1690 if (!buffer_mapped(bh))
1691 continue;
1692
1693
1694
1695
1696
1697
1698
1699 if (wbc->sync_mode != WB_SYNC_NONE) {
1700 lock_buffer(bh);
1701 } else if (!trylock_buffer(bh)) {
1702 redirty_page_for_writepage(wbc, page);
1703 continue;
1704 }
1705 if (test_clear_buffer_dirty(bh)) {
1706 mark_buffer_async_write_endio(bh, handler);
1707 } else {
1708 unlock_buffer(bh);
1709 }
1710 } while ((bh = bh->b_this_page) != head);
1711
1712
1713
1714
1715
1716 BUG_ON(PageWriteback(page));
1717 set_page_writeback(page);
1718
1719 do {
1720 struct buffer_head *next = bh->b_this_page;
1721 if (buffer_async_write(bh)) {
1722 submit_bh(write_op, bh);
1723 nr_underway++;
1724 }
1725 bh = next;
1726 } while (bh != head);
1727 unlock_page(page);
1728
1729 err = 0;
1730done:
1731 if (nr_underway == 0) {
1732
1733
1734
1735
1736
1737 end_page_writeback(page);
1738
1739
1740
1741
1742
1743 }
1744 return err;
1745
1746recover:
1747
1748
1749
1750
1751
1752
1753 bh = head;
1754
1755 do {
1756 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1757 !buffer_delay(bh)) {
1758 lock_buffer(bh);
1759 mark_buffer_async_write_endio(bh, handler);
1760 } else {
1761
1762
1763
1764
1765 clear_buffer_dirty(bh);
1766 }
1767 } while ((bh = bh->b_this_page) != head);
1768 SetPageError(page);
1769 BUG_ON(PageWriteback(page));
1770 mapping_set_error(page->mapping, err);
1771 set_page_writeback(page);
1772 do {
1773 struct buffer_head *next = bh->b_this_page;
1774 if (buffer_async_write(bh)) {
1775 clear_buffer_dirty(bh);
1776 submit_bh(write_op, bh);
1777 nr_underway++;
1778 }
1779 bh = next;
1780 } while (bh != head);
1781 unlock_page(page);
1782 goto done;
1783}
1784
1785
1786
1787
1788
1789
1790void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1791{
1792 unsigned int block_start, block_end;
1793 struct buffer_head *head, *bh;
1794
1795 BUG_ON(!PageLocked(page));
1796 if (!page_has_buffers(page))
1797 return;
1798
1799 bh = head = page_buffers(page);
1800 block_start = 0;
1801 do {
1802 block_end = block_start + bh->b_size;
1803
1804 if (buffer_new(bh)) {
1805 if (block_end > from && block_start < to) {
1806 if (!PageUptodate(page)) {
1807 unsigned start, size;
1808
1809 start = max(from, block_start);
1810 size = min(to, block_end) - start;
1811
1812 zero_user(page, start, size);
1813 set_buffer_uptodate(bh);
1814 }
1815
1816 clear_buffer_new(bh);
1817 mark_buffer_dirty(bh);
1818 }
1819 }
1820
1821 block_start = block_end;
1822 bh = bh->b_this_page;
1823 } while (bh != head);
1824}
1825EXPORT_SYMBOL(page_zero_new_buffers);
1826
1827int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1828 get_block_t *get_block)
1829{
1830 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1831 unsigned to = from + len;
1832 struct inode *inode = page->mapping->host;
1833 unsigned block_start, block_end;
1834 sector_t block;
1835 int err = 0;
1836 unsigned blocksize, bbits;
1837 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1838
1839 BUG_ON(!PageLocked(page));
1840 BUG_ON(from > PAGE_CACHE_SIZE);
1841 BUG_ON(to > PAGE_CACHE_SIZE);
1842 BUG_ON(from > to);
1843
1844 blocksize = 1 << inode->i_blkbits;
1845 if (!page_has_buffers(page))
1846 create_empty_buffers(page, blocksize, 0);
1847 head = page_buffers(page);
1848
1849 bbits = inode->i_blkbits;
1850 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1851
1852 for(bh = head, block_start = 0; bh != head || !block_start;
1853 block++, block_start=block_end, bh = bh->b_this_page) {
1854 block_end = block_start + blocksize;
1855 if (block_end <= from || block_start >= to) {
1856 if (PageUptodate(page)) {
1857 if (!buffer_uptodate(bh))
1858 set_buffer_uptodate(bh);
1859 }
1860 continue;
1861 }
1862 if (buffer_new(bh))
1863 clear_buffer_new(bh);
1864 if (!buffer_mapped(bh)) {
1865 WARN_ON(bh->b_size != blocksize);
1866 err = get_block(inode, block, bh, 1);
1867 if (err)
1868 break;
1869 if (buffer_new(bh)) {
1870 unmap_underlying_metadata(bh->b_bdev,
1871 bh->b_blocknr);
1872 if (PageUptodate(page)) {
1873 clear_buffer_new(bh);
1874 set_buffer_uptodate(bh);
1875 mark_buffer_dirty(bh);
1876 continue;
1877 }
1878 if (block_end > to || block_start < from)
1879 zero_user_segments(page,
1880 to, block_end,
1881 block_start, from);
1882 continue;
1883 }
1884 }
1885 if (PageUptodate(page)) {
1886 if (!buffer_uptodate(bh))
1887 set_buffer_uptodate(bh);
1888 continue;
1889 }
1890 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1891 !buffer_unwritten(bh) &&
1892 (block_start < from || block_end > to)) {
1893 ll_rw_block(READ, 1, &bh);
1894 *wait_bh++=bh;
1895 }
1896 }
1897
1898
1899
1900 while(wait_bh > wait) {
1901 wait_on_buffer(*--wait_bh);
1902 if (!buffer_uptodate(*wait_bh))
1903 err = -EIO;
1904 }
1905 if (unlikely(err))
1906 page_zero_new_buffers(page, from, to);
1907 return err;
1908}
1909EXPORT_SYMBOL(__block_write_begin);
1910
1911static int __block_commit_write(struct inode *inode, struct page *page,
1912 unsigned from, unsigned to)
1913{
1914 unsigned block_start, block_end;
1915 int partial = 0;
1916 unsigned blocksize;
1917 struct buffer_head *bh, *head;
1918
1919 blocksize = 1 << inode->i_blkbits;
1920
1921 for(bh = head = page_buffers(page), block_start = 0;
1922 bh != head || !block_start;
1923 block_start=block_end, bh = bh->b_this_page) {
1924 block_end = block_start + blocksize;
1925 if (block_end <= from || block_start >= to) {
1926 if (!buffer_uptodate(bh))
1927 partial = 1;
1928 } else {
1929 set_buffer_uptodate(bh);
1930 mark_buffer_dirty(bh);
1931 }
1932 clear_buffer_new(bh);
1933 }
1934
1935
1936
1937
1938
1939
1940
1941 if (!partial)
1942 SetPageUptodate(page);
1943 return 0;
1944}
1945
1946
1947
1948
1949
1950
1951
1952int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1953 unsigned flags, struct page **pagep, get_block_t *get_block)
1954{
1955 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1956 struct page *page;
1957 int status;
1958
1959 page = grab_cache_page_write_begin(mapping, index, flags);
1960 if (!page)
1961 return -ENOMEM;
1962
1963 status = __block_write_begin(page, pos, len, get_block);
1964 if (unlikely(status)) {
1965 unlock_page(page);
1966 page_cache_release(page);
1967 page = NULL;
1968 }
1969
1970 *pagep = page;
1971 return status;
1972}
1973EXPORT_SYMBOL(block_write_begin);
1974
1975int block_write_end(struct file *file, struct address_space *mapping,
1976 loff_t pos, unsigned len, unsigned copied,
1977 struct page *page, void *fsdata)
1978{
1979 struct inode *inode = mapping->host;
1980 unsigned start;
1981
1982 start = pos & (PAGE_CACHE_SIZE - 1);
1983
1984 if (unlikely(copied < len)) {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997 if (!PageUptodate(page))
1998 copied = 0;
1999
2000 page_zero_new_buffers(page, start+copied, start+len);
2001 }
2002 flush_dcache_page(page);
2003
2004
2005 __block_commit_write(inode, page, start, start+copied);
2006
2007 return copied;
2008}
2009EXPORT_SYMBOL(block_write_end);
2010
2011int generic_write_end(struct file *file, struct address_space *mapping,
2012 loff_t pos, unsigned len, unsigned copied,
2013 struct page *page, void *fsdata)
2014{
2015 struct inode *inode = mapping->host;
2016 int i_size_changed = 0;
2017
2018 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2019
2020
2021
2022
2023
2024
2025
2026
2027 if (pos+copied > inode->i_size) {
2028 i_size_write(inode, pos+copied);
2029 i_size_changed = 1;
2030 }
2031
2032 unlock_page(page);
2033 page_cache_release(page);
2034
2035
2036
2037
2038
2039
2040
2041 if (i_size_changed)
2042 mark_inode_dirty(inode);
2043
2044 return copied;
2045}
2046EXPORT_SYMBOL(generic_write_end);
2047
2048
2049
2050
2051
2052
2053
2054
2055int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2056 unsigned long from)
2057{
2058 struct inode *inode = page->mapping->host;
2059 unsigned block_start, block_end, blocksize;
2060 unsigned to;
2061 struct buffer_head *bh, *head;
2062 int ret = 1;
2063
2064 if (!page_has_buffers(page))
2065 return 0;
2066
2067 blocksize = 1 << inode->i_blkbits;
2068 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2069 to = from + to;
2070 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2071 return 0;
2072
2073 head = page_buffers(page);
2074 bh = head;
2075 block_start = 0;
2076 do {
2077 block_end = block_start + blocksize;
2078 if (block_end > from && block_start < to) {
2079 if (!buffer_uptodate(bh)) {
2080 ret = 0;
2081 break;
2082 }
2083 if (block_end >= to)
2084 break;
2085 }
2086 block_start = block_end;
2087 bh = bh->b_this_page;
2088 } while (bh != head);
2089
2090 return ret;
2091}
2092EXPORT_SYMBOL(block_is_partially_uptodate);
2093
2094
2095
2096
2097
2098
2099
2100
2101int block_read_full_page(struct page *page, get_block_t *get_block)
2102{
2103 struct inode *inode = page->mapping->host;
2104 sector_t iblock, lblock;
2105 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2106 unsigned int blocksize;
2107 int nr, i;
2108 int fully_mapped = 1;
2109
2110 BUG_ON(!PageLocked(page));
2111 blocksize = 1 << inode->i_blkbits;
2112 if (!page_has_buffers(page))
2113 create_empty_buffers(page, blocksize, 0);
2114 head = page_buffers(page);
2115
2116 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2117 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2118 bh = head;
2119 nr = 0;
2120 i = 0;
2121
2122 do {
2123 if (buffer_uptodate(bh))
2124 continue;
2125
2126 if (!buffer_mapped(bh)) {
2127 int err = 0;
2128
2129 fully_mapped = 0;
2130 if (iblock < lblock) {
2131 WARN_ON(bh->b_size != blocksize);
2132 err = get_block(inode, iblock, bh, 0);
2133 if (err)
2134 SetPageError(page);
2135 }
2136 if (!buffer_mapped(bh)) {
2137 zero_user(page, i * blocksize, blocksize);
2138 if (!err)
2139 set_buffer_uptodate(bh);
2140 continue;
2141 }
2142
2143
2144
2145
2146 if (buffer_uptodate(bh))
2147 continue;
2148 }
2149 arr[nr++] = bh;
2150 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2151
2152 if (fully_mapped)
2153 SetPageMappedToDisk(page);
2154
2155 if (!nr) {
2156
2157
2158
2159
2160 if (!PageError(page))
2161 SetPageUptodate(page);
2162 unlock_page(page);
2163 return 0;
2164 }
2165
2166
2167 for (i = 0; i < nr; i++) {
2168 bh = arr[i];
2169 lock_buffer(bh);
2170 mark_buffer_async_read(bh);
2171 }
2172
2173
2174
2175
2176
2177
2178 for (i = 0; i < nr; i++) {
2179 bh = arr[i];
2180 if (buffer_uptodate(bh))
2181 end_buffer_async_read(bh, 1);
2182 else
2183 submit_bh(READ, bh);
2184 }
2185 return 0;
2186}
2187EXPORT_SYMBOL(block_read_full_page);
2188
2189
2190
2191
2192
2193int generic_cont_expand_simple(struct inode *inode, loff_t size)
2194{
2195 struct address_space *mapping = inode->i_mapping;
2196 struct page *page;
2197 void *fsdata;
2198 int err;
2199
2200 err = inode_newsize_ok(inode, size);
2201 if (err)
2202 goto out;
2203
2204 err = pagecache_write_begin(NULL, mapping, size, 0,
2205 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2206 &page, &fsdata);
2207 if (err)
2208 goto out;
2209
2210 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2211 BUG_ON(err > 0);
2212
2213out:
2214 return err;
2215}
2216EXPORT_SYMBOL(generic_cont_expand_simple);
2217
2218static int cont_expand_zero(struct file *file, struct address_space *mapping,
2219 loff_t pos, loff_t *bytes)
2220{
2221 struct inode *inode = mapping->host;
2222 unsigned blocksize = 1 << inode->i_blkbits;
2223 struct page *page;
2224 void *fsdata;
2225 pgoff_t index, curidx;
2226 loff_t curpos;
2227 unsigned zerofrom, offset, len;
2228 int err = 0;
2229
2230 index = pos >> PAGE_CACHE_SHIFT;
2231 offset = pos & ~PAGE_CACHE_MASK;
2232
2233 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2234 zerofrom = curpos & ~PAGE_CACHE_MASK;
2235 if (zerofrom & (blocksize-1)) {
2236 *bytes |= (blocksize-1);
2237 (*bytes)++;
2238 }
2239 len = PAGE_CACHE_SIZE - zerofrom;
2240
2241 err = pagecache_write_begin(file, mapping, curpos, len,
2242 AOP_FLAG_UNINTERRUPTIBLE,
2243 &page, &fsdata);
2244 if (err)
2245 goto out;
2246 zero_user(page, zerofrom, len);
2247 err = pagecache_write_end(file, mapping, curpos, len, len,
2248 page, fsdata);
2249 if (err < 0)
2250 goto out;
2251 BUG_ON(err != len);
2252 err = 0;
2253
2254 balance_dirty_pages_ratelimited(mapping);
2255 }
2256
2257
2258 if (index == curidx) {
2259 zerofrom = curpos & ~PAGE_CACHE_MASK;
2260
2261 if (offset <= zerofrom) {
2262 goto out;
2263 }
2264 if (zerofrom & (blocksize-1)) {
2265 *bytes |= (blocksize-1);
2266 (*bytes)++;
2267 }
2268 len = offset - zerofrom;
2269
2270 err = pagecache_write_begin(file, mapping, curpos, len,
2271 AOP_FLAG_UNINTERRUPTIBLE,
2272 &page, &fsdata);
2273 if (err)
2274 goto out;
2275 zero_user(page, zerofrom, len);
2276 err = pagecache_write_end(file, mapping, curpos, len, len,
2277 page, fsdata);
2278 if (err < 0)
2279 goto out;
2280 BUG_ON(err != len);
2281 err = 0;
2282 }
2283out:
2284 return err;
2285}
2286
2287
2288
2289
2290
2291int cont_write_begin(struct file *file, struct address_space *mapping,
2292 loff_t pos, unsigned len, unsigned flags,
2293 struct page **pagep, void **fsdata,
2294 get_block_t *get_block, loff_t *bytes)
2295{
2296 struct inode *inode = mapping->host;
2297 unsigned blocksize = 1 << inode->i_blkbits;
2298 unsigned zerofrom;
2299 int err;
2300
2301 err = cont_expand_zero(file, mapping, pos, bytes);
2302 if (err)
2303 return err;
2304
2305 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2306 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2307 *bytes |= (blocksize-1);
2308 (*bytes)++;
2309 }
2310
2311 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2312}
2313EXPORT_SYMBOL(cont_write_begin);
2314
2315int block_commit_write(struct page *page, unsigned from, unsigned to)
2316{
2317 struct inode *inode = page->mapping->host;
2318 __block_commit_write(inode,page,from,to);
2319 return 0;
2320}
2321EXPORT_SYMBOL(block_commit_write);
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2342 get_block_t get_block)
2343{
2344 struct page *page = vmf->page;
2345 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2346 unsigned long end;
2347 loff_t size;
2348 int ret;
2349
2350 lock_page(page);
2351 size = i_size_read(inode);
2352 if ((page->mapping != inode->i_mapping) ||
2353 (page_offset(page) > size)) {
2354
2355 ret = -EFAULT;
2356 goto out_unlock;
2357 }
2358
2359
2360 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2361 end = size & ~PAGE_CACHE_MASK;
2362 else
2363 end = PAGE_CACHE_SIZE;
2364
2365 ret = __block_write_begin(page, 0, end, get_block);
2366 if (!ret)
2367 ret = block_commit_write(page, 0, end);
2368
2369 if (unlikely(ret < 0))
2370 goto out_unlock;
2371
2372
2373
2374
2375
2376
2377
2378 set_page_dirty(page);
2379 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2380 ret = -EAGAIN;
2381 goto out_unlock;
2382 }
2383 wait_on_page_writeback(page);
2384 return 0;
2385out_unlock:
2386 unlock_page(page);
2387 return ret;
2388}
2389EXPORT_SYMBOL(__block_page_mkwrite);
2390
2391int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2392 get_block_t get_block)
2393{
2394 int ret;
2395 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2396
2397
2398
2399
2400
2401 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2402 ret = __block_page_mkwrite(vma, vmf, get_block);
2403 return block_page_mkwrite_return(ret);
2404}
2405EXPORT_SYMBOL(block_page_mkwrite);
2406
2407
2408
2409
2410
2411
2412static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2413{
2414 __end_buffer_read_notouch(bh, uptodate);
2415}
2416
2417
2418
2419
2420
2421
2422static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2423{
2424 struct buffer_head *bh;
2425
2426 BUG_ON(!PageLocked(page));
2427
2428 spin_lock(&page->mapping->private_lock);
2429 bh = head;
2430 do {
2431 if (PageDirty(page))
2432 set_buffer_dirty(bh);
2433 if (!bh->b_this_page)
2434 bh->b_this_page = head;
2435 bh = bh->b_this_page;
2436 } while (bh != head);
2437 attach_page_buffers(page, head);
2438 spin_unlock(&page->mapping->private_lock);
2439}
2440
2441
2442
2443
2444
2445
2446int nobh_write_begin(struct address_space *mapping,
2447 loff_t pos, unsigned len, unsigned flags,
2448 struct page **pagep, void **fsdata,
2449 get_block_t *get_block)
2450{
2451 struct inode *inode = mapping->host;
2452 const unsigned blkbits = inode->i_blkbits;
2453 const unsigned blocksize = 1 << blkbits;
2454 struct buffer_head *head, *bh;
2455 struct page *page;
2456 pgoff_t index;
2457 unsigned from, to;
2458 unsigned block_in_page;
2459 unsigned block_start, block_end;
2460 sector_t block_in_file;
2461 int nr_reads = 0;
2462 int ret = 0;
2463 int is_mapped_to_disk = 1;
2464
2465 index = pos >> PAGE_CACHE_SHIFT;
2466 from = pos & (PAGE_CACHE_SIZE - 1);
2467 to = from + len;
2468
2469 page = grab_cache_page_write_begin(mapping, index, flags);
2470 if (!page)
2471 return -ENOMEM;
2472 *pagep = page;
2473 *fsdata = NULL;
2474
2475 if (page_has_buffers(page)) {
2476 ret = __block_write_begin(page, pos, len, get_block);
2477 if (unlikely(ret))
2478 goto out_release;
2479 return ret;
2480 }
2481
2482 if (PageMappedToDisk(page))
2483 return 0;
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494 head = alloc_page_buffers(page, blocksize, 0);
2495 if (!head) {
2496 ret = -ENOMEM;
2497 goto out_release;
2498 }
2499
2500 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2501
2502
2503
2504
2505
2506
2507 for (block_start = 0, block_in_page = 0, bh = head;
2508 block_start < PAGE_CACHE_SIZE;
2509 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2510 int create;
2511
2512 block_end = block_start + blocksize;
2513 bh->b_state = 0;
2514 create = 1;
2515 if (block_start >= to)
2516 create = 0;
2517 ret = get_block(inode, block_in_file + block_in_page,
2518 bh, create);
2519 if (ret)
2520 goto failed;
2521 if (!buffer_mapped(bh))
2522 is_mapped_to_disk = 0;
2523 if (buffer_new(bh))
2524 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2525 if (PageUptodate(page)) {
2526 set_buffer_uptodate(bh);
2527 continue;
2528 }
2529 if (buffer_new(bh) || !buffer_mapped(bh)) {
2530 zero_user_segments(page, block_start, from,
2531 to, block_end);
2532 continue;
2533 }
2534 if (buffer_uptodate(bh))
2535 continue;
2536 if (block_start < from || block_end > to) {
2537 lock_buffer(bh);
2538 bh->b_end_io = end_buffer_read_nobh;
2539 submit_bh(READ, bh);
2540 nr_reads++;
2541 }
2542 }
2543
2544 if (nr_reads) {
2545
2546
2547
2548
2549
2550 for (bh = head; bh; bh = bh->b_this_page) {
2551 wait_on_buffer(bh);
2552 if (!buffer_uptodate(bh))
2553 ret = -EIO;
2554 }
2555 if (ret)
2556 goto failed;
2557 }
2558
2559 if (is_mapped_to_disk)
2560 SetPageMappedToDisk(page);
2561
2562 *fsdata = head;
2563
2564 return 0;
2565
2566failed:
2567 BUG_ON(!ret);
2568
2569
2570
2571
2572
2573
2574
2575 attach_nobh_buffers(page, head);
2576 page_zero_new_buffers(page, from, to);
2577
2578out_release:
2579 unlock_page(page);
2580 page_cache_release(page);
2581 *pagep = NULL;
2582
2583 return ret;
2584}
2585EXPORT_SYMBOL(nobh_write_begin);
2586
2587int nobh_write_end(struct file *file, struct address_space *mapping,
2588 loff_t pos, unsigned len, unsigned copied,
2589 struct page *page, void *fsdata)
2590{
2591 struct inode *inode = page->mapping->host;
2592 struct buffer_head *head = fsdata;
2593 struct buffer_head *bh;
2594 BUG_ON(fsdata != NULL && page_has_buffers(page));
2595
2596 if (unlikely(copied < len) && head)
2597 attach_nobh_buffers(page, head);
2598 if (page_has_buffers(page))
2599 return generic_write_end(file, mapping, pos, len,
2600 copied, page, fsdata);
2601
2602 SetPageUptodate(page);
2603 set_page_dirty(page);
2604 if (pos+copied > inode->i_size) {
2605 i_size_write(inode, pos+copied);
2606 mark_inode_dirty(inode);
2607 }
2608
2609 unlock_page(page);
2610 page_cache_release(page);
2611
2612 while (head) {
2613 bh = head;
2614 head = head->b_this_page;
2615 free_buffer_head(bh);
2616 }
2617
2618 return copied;
2619}
2620EXPORT_SYMBOL(nobh_write_end);
2621
2622
2623
2624
2625
2626
2627int nobh_writepage(struct page *page, get_block_t *get_block,
2628 struct writeback_control *wbc)
2629{
2630 struct inode * const inode = page->mapping->host;
2631 loff_t i_size = i_size_read(inode);
2632 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2633 unsigned offset;
2634 int ret;
2635
2636
2637 if (page->index < end_index)
2638 goto out;
2639
2640
2641 offset = i_size & (PAGE_CACHE_SIZE-1);
2642 if (page->index >= end_index+1 || !offset) {
2643
2644
2645
2646
2647
2648#if 0
2649
2650 if (page->mapping->a_ops->invalidatepage)
2651 page->mapping->a_ops->invalidatepage(page, offset);
2652#endif
2653 unlock_page(page);
2654 return 0;
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2665out:
2666 ret = mpage_writepage(page, get_block, wbc);
2667 if (ret == -EAGAIN)
2668 ret = __block_write_full_page(inode, page, get_block, wbc,
2669 end_buffer_async_write);
2670 return ret;
2671}
2672EXPORT_SYMBOL(nobh_writepage);
2673
2674int nobh_truncate_page(struct address_space *mapping,
2675 loff_t from, get_block_t *get_block)
2676{
2677 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2678 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2679 unsigned blocksize;
2680 sector_t iblock;
2681 unsigned length, pos;
2682 struct inode *inode = mapping->host;
2683 struct page *page;
2684 struct buffer_head map_bh;
2685 int err;
2686
2687 blocksize = 1 << inode->i_blkbits;
2688 length = offset & (blocksize - 1);
2689
2690
2691 if (!length)
2692 return 0;
2693
2694 length = blocksize - length;
2695 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2696
2697 page = grab_cache_page(mapping, index);
2698 err = -ENOMEM;
2699 if (!page)
2700 goto out;
2701
2702 if (page_has_buffers(page)) {
2703has_buffers:
2704 unlock_page(page);
2705 page_cache_release(page);
2706 return block_truncate_page(mapping, from, get_block);
2707 }
2708
2709
2710 pos = blocksize;
2711 while (offset >= pos) {
2712 iblock++;
2713 pos += blocksize;
2714 }
2715
2716 map_bh.b_size = blocksize;
2717 map_bh.b_state = 0;
2718 err = get_block(inode, iblock, &map_bh, 0);
2719 if (err)
2720 goto unlock;
2721
2722 if (!buffer_mapped(&map_bh))
2723 goto unlock;
2724
2725
2726 if (!PageUptodate(page)) {
2727 err = mapping->a_ops->readpage(NULL, page);
2728 if (err) {
2729 page_cache_release(page);
2730 goto out;
2731 }
2732 lock_page(page);
2733 if (!PageUptodate(page)) {
2734 err = -EIO;
2735 goto unlock;
2736 }
2737 if (page_has_buffers(page))
2738 goto has_buffers;
2739 }
2740 zero_user(page, offset, length);
2741 set_page_dirty(page);
2742 err = 0;
2743
2744unlock:
2745 unlock_page(page);
2746 page_cache_release(page);
2747out:
2748 return err;
2749}
2750EXPORT_SYMBOL(nobh_truncate_page);
2751
2752int block_truncate_page(struct address_space *mapping,
2753 loff_t from, get_block_t *get_block)
2754{
2755 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2756 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2757 unsigned blocksize;
2758 sector_t iblock;
2759 unsigned length, pos;
2760 struct inode *inode = mapping->host;
2761 struct page *page;
2762 struct buffer_head *bh;
2763 int err;
2764
2765 blocksize = 1 << inode->i_blkbits;
2766 length = offset & (blocksize - 1);
2767
2768
2769 if (!length)
2770 return 0;
2771
2772 length = blocksize - length;
2773 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2774
2775 page = grab_cache_page(mapping, index);
2776 err = -ENOMEM;
2777 if (!page)
2778 goto out;
2779
2780 if (!page_has_buffers(page))
2781 create_empty_buffers(page, blocksize, 0);
2782
2783
2784 bh = page_buffers(page);
2785 pos = blocksize;
2786 while (offset >= pos) {
2787 bh = bh->b_this_page;
2788 iblock++;
2789 pos += blocksize;
2790 }
2791
2792 err = 0;
2793 if (!buffer_mapped(bh)) {
2794 WARN_ON(bh->b_size != blocksize);
2795 err = get_block(inode, iblock, bh, 0);
2796 if (err)
2797 goto unlock;
2798
2799 if (!buffer_mapped(bh))
2800 goto unlock;
2801 }
2802
2803
2804 if (PageUptodate(page))
2805 set_buffer_uptodate(bh);
2806
2807 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2808 err = -EIO;
2809 ll_rw_block(READ, 1, &bh);
2810 wait_on_buffer(bh);
2811
2812 if (!buffer_uptodate(bh))
2813 goto unlock;
2814 }
2815
2816 zero_user(page, offset, length);
2817 mark_buffer_dirty(bh);
2818 err = 0;
2819
2820unlock:
2821 unlock_page(page);
2822 page_cache_release(page);
2823out:
2824 return err;
2825}
2826EXPORT_SYMBOL(block_truncate_page);
2827
2828
2829
2830
2831
2832int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2833 struct writeback_control *wbc, bh_end_io_t *handler)
2834{
2835 struct inode * const inode = page->mapping->host;
2836 loff_t i_size = i_size_read(inode);
2837 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2838 unsigned offset;
2839
2840
2841 if (page->index < end_index)
2842 return __block_write_full_page(inode, page, get_block, wbc,
2843 handler);
2844
2845
2846 offset = i_size & (PAGE_CACHE_SIZE-1);
2847 if (page->index >= end_index+1 || !offset) {
2848
2849
2850
2851
2852
2853 do_invalidatepage(page, 0);
2854 unlock_page(page);
2855 return 0;
2856 }
2857
2858
2859
2860
2861
2862
2863
2864
2865 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2866 return __block_write_full_page(inode, page, get_block, wbc, handler);
2867}
2868EXPORT_SYMBOL(block_write_full_page_endio);
2869
2870
2871
2872
2873int block_write_full_page(struct page *page, get_block_t *get_block,
2874 struct writeback_control *wbc)
2875{
2876 return block_write_full_page_endio(page, get_block, wbc,
2877 end_buffer_async_write);
2878}
2879EXPORT_SYMBOL(block_write_full_page);
2880
2881sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2882 get_block_t *get_block)
2883{
2884 struct buffer_head tmp;
2885 struct inode *inode = mapping->host;
2886 tmp.b_state = 0;
2887 tmp.b_blocknr = 0;
2888 tmp.b_size = 1 << inode->i_blkbits;
2889 get_block(inode, block, &tmp, 0);
2890 return tmp.b_blocknr;
2891}
2892EXPORT_SYMBOL(generic_block_bmap);
2893
2894static void end_bio_bh_io_sync(struct bio *bio, int err)
2895{
2896 struct buffer_head *bh = bio->bi_private;
2897
2898 if (err == -EOPNOTSUPP) {
2899 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2900 }
2901
2902 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2903 set_bit(BH_Quiet, &bh->b_state);
2904
2905 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2906 bio_put(bio);
2907}
2908
2909int submit_bh(int rw, struct buffer_head * bh)
2910{
2911 struct bio *bio;
2912 int ret = 0;
2913
2914 BUG_ON(!buffer_locked(bh));
2915 BUG_ON(!buffer_mapped(bh));
2916 BUG_ON(!bh->b_end_io);
2917 BUG_ON(buffer_delay(bh));
2918 BUG_ON(buffer_unwritten(bh));
2919
2920
2921
2922
2923 if (test_set_buffer_req(bh) && (rw & WRITE))
2924 clear_buffer_write_io_error(bh);
2925
2926
2927
2928
2929
2930 bio = bio_alloc(GFP_NOIO, 1);
2931
2932 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2933 bio->bi_bdev = bh->b_bdev;
2934 bio->bi_io_vec[0].bv_page = bh->b_page;
2935 bio->bi_io_vec[0].bv_len = bh->b_size;
2936 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2937
2938 bio->bi_vcnt = 1;
2939 bio->bi_idx = 0;
2940 bio->bi_size = bh->b_size;
2941
2942 bio->bi_end_io = end_bio_bh_io_sync;
2943 bio->bi_private = bh;
2944
2945 bio_get(bio);
2946 submit_bio(rw, bio);
2947
2948 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2949 ret = -EOPNOTSUPP;
2950
2951 bio_put(bio);
2952 return ret;
2953}
2954EXPORT_SYMBOL(submit_bh);
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2982{
2983 int i;
2984
2985 for (i = 0; i < nr; i++) {
2986 struct buffer_head *bh = bhs[i];
2987
2988 if (!trylock_buffer(bh))
2989 continue;
2990 if (rw == WRITE) {
2991 if (test_clear_buffer_dirty(bh)) {
2992 bh->b_end_io = end_buffer_write_sync;
2993 get_bh(bh);
2994 submit_bh(WRITE, bh);
2995 continue;
2996 }
2997 } else {
2998 if (!buffer_uptodate(bh)) {
2999 bh->b_end_io = end_buffer_read_sync;
3000 get_bh(bh);
3001 submit_bh(rw, bh);
3002 continue;
3003 }
3004 }
3005 unlock_buffer(bh);
3006 }
3007}
3008EXPORT_SYMBOL(ll_rw_block);
3009
3010void write_dirty_buffer(struct buffer_head *bh, int rw)
3011{
3012 lock_buffer(bh);
3013 if (!test_clear_buffer_dirty(bh)) {
3014 unlock_buffer(bh);
3015 return;
3016 }
3017 bh->b_end_io = end_buffer_write_sync;
3018 get_bh(bh);
3019 submit_bh(rw, bh);
3020}
3021EXPORT_SYMBOL(write_dirty_buffer);
3022
3023
3024
3025
3026
3027
3028int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3029{
3030 int ret = 0;
3031
3032 WARN_ON(atomic_read(&bh->b_count) < 1);
3033 lock_buffer(bh);
3034 if (test_clear_buffer_dirty(bh)) {
3035 get_bh(bh);
3036 bh->b_end_io = end_buffer_write_sync;
3037 ret = submit_bh(rw, bh);
3038 wait_on_buffer(bh);
3039 if (!ret && !buffer_uptodate(bh))
3040 ret = -EIO;
3041 } else {
3042 unlock_buffer(bh);
3043 }
3044 return ret;
3045}
3046EXPORT_SYMBOL(__sync_dirty_buffer);
3047
3048int sync_dirty_buffer(struct buffer_head *bh)
3049{
3050 return __sync_dirty_buffer(bh, WRITE_SYNC);
3051}
3052EXPORT_SYMBOL(sync_dirty_buffer);
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074static inline int buffer_busy(struct buffer_head *bh)
3075{
3076 return atomic_read(&bh->b_count) |
3077 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3078}
3079
3080static int
3081drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3082{
3083 struct buffer_head *head = page_buffers(page);
3084 struct buffer_head *bh;
3085
3086 bh = head;
3087 do {
3088 if (buffer_write_io_error(bh) && page->mapping)
3089 set_bit(AS_EIO, &page->mapping->flags);
3090 if (buffer_busy(bh))
3091 goto failed;
3092 bh = bh->b_this_page;
3093 } while (bh != head);
3094
3095 do {
3096 struct buffer_head *next = bh->b_this_page;
3097
3098 if (bh->b_assoc_map)
3099 __remove_assoc_queue(bh);
3100 bh = next;
3101 } while (bh != head);
3102 *buffers_to_free = head;
3103 __clear_page_buffers(page);
3104 return 1;
3105failed:
3106 return 0;
3107}
3108
3109int try_to_free_buffers(struct page *page)
3110{
3111 struct address_space * const mapping = page->mapping;
3112 struct buffer_head *buffers_to_free = NULL;
3113 int ret = 0;
3114
3115 BUG_ON(!PageLocked(page));
3116 if (PageWriteback(page))
3117 return 0;
3118
3119 if (mapping == NULL) {
3120 ret = drop_buffers(page, &buffers_to_free);
3121 goto out;
3122 }
3123
3124 spin_lock(&mapping->private_lock);
3125 ret = drop_buffers(page, &buffers_to_free);
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141 if (ret)
3142 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3143 spin_unlock(&mapping->private_lock);
3144out:
3145 if (buffers_to_free) {
3146 struct buffer_head *bh = buffers_to_free;
3147
3148 do {
3149 struct buffer_head *next = bh->b_this_page;
3150 free_buffer_head(bh);
3151 bh = next;
3152 } while (bh != buffers_to_free);
3153 }
3154 return ret;
3155}
3156EXPORT_SYMBOL(try_to_free_buffers);
3157
3158
3159
3160
3161
3162
3163
3164
3165SYSCALL_DEFINE2(bdflush, int, func, long, data)
3166{
3167 static int msg_count;
3168
3169 if (!capable(CAP_SYS_ADMIN))
3170 return -EPERM;
3171
3172 if (msg_count < 5) {
3173 msg_count++;
3174 printk(KERN_INFO
3175 "warning: process `%s' used the obsolete bdflush"
3176 " system call\n", current->comm);
3177 printk(KERN_INFO "Fix your initscripts?\n");
3178 }
3179
3180 if (func == 1)
3181 do_exit(0);
3182 return 0;
3183}
3184
3185
3186
3187
3188static struct kmem_cache *bh_cachep;
3189
3190
3191
3192
3193
3194static int max_buffer_heads;
3195
3196int buffer_heads_over_limit;
3197
3198struct bh_accounting {
3199 int nr;
3200 int ratelimit;
3201};
3202
3203static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3204
3205static void recalc_bh_state(void)
3206{
3207 int i;
3208 int tot = 0;
3209
3210 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3211 return;
3212 __this_cpu_write(bh_accounting.ratelimit, 0);
3213 for_each_online_cpu(i)
3214 tot += per_cpu(bh_accounting, i).nr;
3215 buffer_heads_over_limit = (tot > max_buffer_heads);
3216}
3217
3218struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3219{
3220 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3221 if (ret) {
3222 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3223 preempt_disable();
3224 __this_cpu_inc(bh_accounting.nr);
3225 recalc_bh_state();
3226 preempt_enable();
3227 }
3228 return ret;
3229}
3230EXPORT_SYMBOL(alloc_buffer_head);
3231
3232void free_buffer_head(struct buffer_head *bh)
3233{
3234 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3235 kmem_cache_free(bh_cachep, bh);
3236 preempt_disable();
3237 __this_cpu_dec(bh_accounting.nr);
3238 recalc_bh_state();
3239 preempt_enable();
3240}
3241EXPORT_SYMBOL(free_buffer_head);
3242
3243static void buffer_exit_cpu(int cpu)
3244{
3245 int i;
3246 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3247
3248 for (i = 0; i < BH_LRU_SIZE; i++) {
3249 brelse(b->bhs[i]);
3250 b->bhs[i] = NULL;
3251 }
3252 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3253 per_cpu(bh_accounting, cpu).nr = 0;
3254}
3255
3256static int buffer_cpu_notify(struct notifier_block *self,
3257 unsigned long action, void *hcpu)
3258{
3259 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3260 buffer_exit_cpu((unsigned long)hcpu);
3261 return NOTIFY_OK;
3262}
3263
3264
3265
3266
3267
3268
3269
3270
3271int bh_uptodate_or_lock(struct buffer_head *bh)
3272{
3273 if (!buffer_uptodate(bh)) {
3274 lock_buffer(bh);
3275 if (!buffer_uptodate(bh))
3276 return 0;
3277 unlock_buffer(bh);
3278 }
3279 return 1;
3280}
3281EXPORT_SYMBOL(bh_uptodate_or_lock);
3282
3283
3284
3285
3286
3287
3288
3289int bh_submit_read(struct buffer_head *bh)
3290{
3291 BUG_ON(!buffer_locked(bh));
3292
3293 if (buffer_uptodate(bh)) {
3294 unlock_buffer(bh);
3295 return 0;
3296 }
3297
3298 get_bh(bh);
3299 bh->b_end_io = end_buffer_read_sync;
3300 submit_bh(READ, bh);
3301 wait_on_buffer(bh);
3302 if (buffer_uptodate(bh))
3303 return 0;
3304 return -EIO;
3305}
3306EXPORT_SYMBOL(bh_submit_read);
3307
3308void __init buffer_init(void)
3309{
3310 int nrpages;
3311
3312 bh_cachep = kmem_cache_create("buffer_head",
3313 sizeof(struct buffer_head), 0,
3314 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3315 SLAB_MEM_SPREAD),
3316 NULL);
3317
3318
3319
3320
3321 nrpages = (nr_free_buffer_pages() * 10) / 100;
3322 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3323 hotcpu_notifier(buffer_cpu_notify, 0);
3324}
3325