1
2
3
4
5
6
7
8
9#include "dm-bufio.h"
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/shrinker.h>
16#include <linux/module.h>
17
18#define DM_MSG_PREFIX "bufio"
19
20
21
22
23
24
25
26
27
28#define DM_BUFIO_MIN_BUFFERS 8
29
30#define DM_BUFIO_MEMORY_PERCENT 2
31#define DM_BUFIO_VMALLOC_PERCENT 25
32#define DM_BUFIO_WRITEBACK_PERCENT 75
33
34
35
36
37#define DM_BUFIO_WORK_TIMER_SECS 10
38
39
40
41
42#define DM_BUFIO_DEFAULT_AGE_SECS 60
43
44
45
46
47
48#define DM_BUFIO_INLINE_VECS 16
49
50
51
52
53#define DM_BUFIO_HASH_BITS 20
54#define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
57
58
59
60
61
62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64
65
66
67
68#define LIST_CLEAN 0
69#define LIST_DIRTY 1
70#define LIST_SIZE 2
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87struct dm_bufio_client {
88 struct mutex lock;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
101
102 struct dm_io_client *dm_io;
103
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
106
107 struct hlist_head *cache_hash;
108 wait_queue_head_t free_buffer_wait;
109
110 int async_write_error;
111
112 struct list_head client_list;
113 struct shrinker shrinker;
114};
115
116
117
118
119#define B_READING 0
120#define B_WRITING 1
121#define B_DIRTY 2
122
123
124
125
126
127
128enum data_mode {
129 DATA_MODE_SLAB = 0,
130 DATA_MODE_GET_FREE_PAGES = 1,
131 DATA_MODE_VMALLOC = 2,
132 DATA_MODE_LIMIT = 3
133};
134
135struct dm_buffer {
136 struct hlist_node hash_list;
137 struct list_head lru_list;
138 sector_t block;
139 void *data;
140 enum data_mode data_mode;
141 unsigned char list_mode;
142 unsigned hold_count;
143 int read_error;
144 int write_error;
145 unsigned long state;
146 unsigned long last_accessed;
147 struct dm_bufio_client *c;
148 struct bio bio;
149 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150};
151
152
153
154static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
155static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
156
157static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
158{
159 unsigned ret = c->blocks_per_page_bits - 1;
160
161 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
162
163 return ret;
164}
165
166#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
167#define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
168
169#define dm_bufio_in_request() (!!current->bio_list)
170
171static void dm_bufio_lock(struct dm_bufio_client *c)
172{
173 mutex_lock_nested(&c->lock, dm_bufio_in_request());
174}
175
176static int dm_bufio_trylock(struct dm_bufio_client *c)
177{
178 return mutex_trylock(&c->lock);
179}
180
181static void dm_bufio_unlock(struct dm_bufio_client *c)
182{
183 mutex_unlock(&c->lock);
184}
185
186
187
188
189#ifdef CONFIG_PREEMPT_VOLUNTARY
190# define dm_bufio_cond_resched() \
191do { \
192 if (unlikely(need_resched())) \
193 _cond_resched(); \
194} while (0)
195#else
196# define dm_bufio_cond_resched() do { } while (0)
197#endif
198
199
200
201
202
203
204static unsigned long dm_bufio_default_cache_size;
205
206
207
208
209static unsigned long dm_bufio_cache_size;
210
211
212
213
214
215static unsigned long dm_bufio_cache_size_latch;
216
217static DEFINE_SPINLOCK(param_spinlock);
218
219
220
221
222static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
223
224static unsigned long dm_bufio_peak_allocated;
225static unsigned long dm_bufio_allocated_kmem_cache;
226static unsigned long dm_bufio_allocated_get_free_pages;
227static unsigned long dm_bufio_allocated_vmalloc;
228static unsigned long dm_bufio_current_allocated;
229
230
231
232
233
234
235static unsigned long dm_bufio_cache_size_per_client;
236
237
238
239
240static int dm_bufio_client_count;
241
242
243
244
245static LIST_HEAD(dm_bufio_all_clients);
246
247
248
249
250
251static DEFINE_MUTEX(dm_bufio_clients_lock);
252
253
254
255static void adjust_total_allocated(enum data_mode data_mode, long diff)
256{
257 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
258 &dm_bufio_allocated_kmem_cache,
259 &dm_bufio_allocated_get_free_pages,
260 &dm_bufio_allocated_vmalloc,
261 };
262
263 spin_lock(¶m_spinlock);
264
265 *class_ptr[data_mode] += diff;
266
267 dm_bufio_current_allocated += diff;
268
269 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
270 dm_bufio_peak_allocated = dm_bufio_current_allocated;
271
272 spin_unlock(¶m_spinlock);
273}
274
275
276
277
278static void __cache_size_refresh(void)
279{
280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
281 BUG_ON(dm_bufio_client_count < 0);
282
283 dm_bufio_cache_size_latch = dm_bufio_cache_size;
284
285 barrier();
286
287
288
289
290 if (!dm_bufio_cache_size_latch) {
291 (void)cmpxchg(&dm_bufio_cache_size, 0,
292 dm_bufio_default_cache_size);
293 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
294 }
295
296 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
297 (dm_bufio_client_count ? : 1);
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
322 enum data_mode *data_mode)
323{
324 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
325 *data_mode = DATA_MODE_SLAB;
326 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
327 }
328
329 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
330 gfp_mask & __GFP_NORETRY) {
331 *data_mode = DATA_MODE_GET_FREE_PAGES;
332 return (void *)__get_free_pages(gfp_mask,
333 c->pages_per_block_bits);
334 }
335
336 *data_mode = DATA_MODE_VMALLOC;
337 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
338}
339
340
341
342
343static void free_buffer_data(struct dm_bufio_client *c,
344 void *data, enum data_mode data_mode)
345{
346 switch (data_mode) {
347 case DATA_MODE_SLAB:
348 kmem_cache_free(DM_BUFIO_CACHE(c), data);
349 break;
350
351 case DATA_MODE_GET_FREE_PAGES:
352 free_pages((unsigned long)data, c->pages_per_block_bits);
353 break;
354
355 case DATA_MODE_VMALLOC:
356 vfree(data);
357 break;
358
359 default:
360 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
361 data_mode);
362 BUG();
363 }
364}
365
366
367
368
369static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
370{
371 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
372 gfp_mask);
373
374 if (!b)
375 return NULL;
376
377 b->c = c;
378
379 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
380 if (!b->data) {
381 kfree(b);
382 return NULL;
383 }
384
385 adjust_total_allocated(b->data_mode, (long)c->block_size);
386
387 return b;
388}
389
390
391
392
393static void free_buffer(struct dm_buffer *b)
394{
395 struct dm_bufio_client *c = b->c;
396
397 adjust_total_allocated(b->data_mode, -(long)c->block_size);
398
399 free_buffer_data(c, b->data, b->data_mode);
400 kfree(b);
401}
402
403
404
405
406static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
407{
408 struct dm_bufio_client *c = b->c;
409
410 c->n_buffers[dirty]++;
411 b->block = block;
412 b->list_mode = dirty;
413 list_add(&b->lru_list, &c->lru[dirty]);
414 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
415 b->last_accessed = jiffies;
416}
417
418
419
420
421static void __unlink_buffer(struct dm_buffer *b)
422{
423 struct dm_bufio_client *c = b->c;
424
425 BUG_ON(!c->n_buffers[b->list_mode]);
426
427 c->n_buffers[b->list_mode]--;
428 hlist_del(&b->hash_list);
429 list_del(&b->lru_list);
430}
431
432
433
434
435static void __relink_lru(struct dm_buffer *b, int dirty)
436{
437 struct dm_bufio_client *c = b->c;
438
439 BUG_ON(!c->n_buffers[b->list_mode]);
440
441 c->n_buffers[b->list_mode]--;
442 c->n_buffers[dirty]++;
443 b->list_mode = dirty;
444 list_del(&b->lru_list);
445 list_add(&b->lru_list, &c->lru[dirty]);
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static void dmio_complete(unsigned long error, void *context)
475{
476 struct dm_buffer *b = context;
477
478 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
479}
480
481static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
482 bio_end_io_t *end_io)
483{
484 int r;
485 struct dm_io_request io_req = {
486 .bi_rw = rw,
487 .notify.fn = dmio_complete,
488 .notify.context = b,
489 .client = b->c->dm_io,
490 };
491 struct dm_io_region region = {
492 .bdev = b->c->bdev,
493 .sector = block << b->c->sectors_per_block_bits,
494 .count = b->c->block_size >> SECTOR_SHIFT,
495 };
496
497 if (b->data_mode != DATA_MODE_VMALLOC) {
498 io_req.mem.type = DM_IO_KMEM;
499 io_req.mem.ptr.addr = b->data;
500 } else {
501 io_req.mem.type = DM_IO_VMA;
502 io_req.mem.ptr.vma = b->data;
503 }
504
505 b->bio.bi_end_io = end_io;
506
507 r = dm_io(&io_req, 1, ®ion, NULL);
508 if (r)
509 end_io(&b->bio, r);
510}
511
512static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
513 bio_end_io_t *end_io)
514{
515 char *ptr;
516 int len;
517
518 bio_init(&b->bio);
519 b->bio.bi_io_vec = b->bio_vec;
520 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
521 b->bio.bi_sector = block << b->c->sectors_per_block_bits;
522 b->bio.bi_bdev = b->c->bdev;
523 b->bio.bi_end_io = end_io;
524
525
526
527
528
529 ptr = b->data;
530 len = b->c->block_size;
531
532 if (len >= PAGE_SIZE)
533 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
534 else
535 BUG_ON((unsigned long)ptr & (len - 1));
536
537 do {
538 if (!bio_add_page(&b->bio, virt_to_page(ptr),
539 len < PAGE_SIZE ? len : PAGE_SIZE,
540 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
541 BUG_ON(b->c->block_size <= PAGE_SIZE);
542 use_dmio(b, rw, block, end_io);
543 return;
544 }
545
546 len -= PAGE_SIZE;
547 ptr += PAGE_SIZE;
548 } while (len > 0);
549
550 submit_bio(rw, &b->bio);
551}
552
553static void submit_io(struct dm_buffer *b, int rw, sector_t block,
554 bio_end_io_t *end_io)
555{
556 if (rw == WRITE && b->c->write_callback)
557 b->c->write_callback(b);
558
559 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
560 b->data_mode != DATA_MODE_VMALLOC)
561 use_inline_bio(b, rw, block, end_io);
562 else
563 use_dmio(b, rw, block, end_io);
564}
565
566
567
568
569
570
571
572
573
574
575
576static void write_endio(struct bio *bio, int error)
577{
578 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
579
580 b->write_error = error;
581 if (unlikely(error)) {
582 struct dm_bufio_client *c = b->c;
583 (void)cmpxchg(&c->async_write_error, 0, error);
584 }
585
586 BUG_ON(!test_bit(B_WRITING, &b->state));
587
588 smp_mb__before_clear_bit();
589 clear_bit(B_WRITING, &b->state);
590 smp_mb__after_clear_bit();
591
592 wake_up_bit(&b->state, B_WRITING);
593}
594
595
596
597
598static int do_io_schedule(void *word)
599{
600 io_schedule();
601
602 return 0;
603}
604
605
606
607
608
609
610
611
612
613
614static void __write_dirty_buffer(struct dm_buffer *b)
615{
616 if (!test_bit(B_DIRTY, &b->state))
617 return;
618
619 clear_bit(B_DIRTY, &b->state);
620 wait_on_bit_lock(&b->state, B_WRITING,
621 do_io_schedule, TASK_UNINTERRUPTIBLE);
622
623 submit_io(b, WRITE, b->block, write_endio);
624}
625
626
627
628
629
630
631static void __make_buffer_clean(struct dm_buffer *b)
632{
633 BUG_ON(b->hold_count);
634
635 if (!b->state)
636 return;
637
638 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
639 __write_dirty_buffer(b);
640 wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
641}
642
643
644
645
646
647static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
648{
649 struct dm_buffer *b;
650
651 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
652 BUG_ON(test_bit(B_WRITING, &b->state));
653 BUG_ON(test_bit(B_DIRTY, &b->state));
654
655 if (!b->hold_count) {
656 __make_buffer_clean(b);
657 __unlink_buffer(b);
658 return b;
659 }
660 dm_bufio_cond_resched();
661 }
662
663 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
664 BUG_ON(test_bit(B_READING, &b->state));
665
666 if (!b->hold_count) {
667 __make_buffer_clean(b);
668 __unlink_buffer(b);
669 return b;
670 }
671 dm_bufio_cond_resched();
672 }
673
674 return NULL;
675}
676
677
678
679
680
681
682
683
684static void __wait_for_free_buffer(struct dm_bufio_client *c)
685{
686 DECLARE_WAITQUEUE(wait, current);
687
688 add_wait_queue(&c->free_buffer_wait, &wait);
689 set_task_state(current, TASK_UNINTERRUPTIBLE);
690 dm_bufio_unlock(c);
691
692 io_schedule();
693
694 set_task_state(current, TASK_RUNNING);
695 remove_wait_queue(&c->free_buffer_wait, &wait);
696
697 dm_bufio_lock(c);
698}
699
700enum new_flag {
701 NF_FRESH = 0,
702 NF_READ = 1,
703 NF_GET = 2,
704 NF_PREFETCH = 3
705};
706
707
708
709
710
711
712
713static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
714{
715 struct dm_buffer *b;
716
717
718
719
720
721
722
723
724
725
726
727
728
729 while (1) {
730 if (dm_bufio_cache_size_latch != 1) {
731 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
732 if (b)
733 return b;
734 }
735
736 if (nf == NF_PREFETCH)
737 return NULL;
738
739 if (!list_empty(&c->reserved_buffers)) {
740 b = list_entry(c->reserved_buffers.next,
741 struct dm_buffer, lru_list);
742 list_del(&b->lru_list);
743 c->need_reserved_buffers++;
744
745 return b;
746 }
747
748 b = __get_unclaimed_buffer(c);
749 if (b)
750 return b;
751
752 __wait_for_free_buffer(c);
753 }
754}
755
756static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
757{
758 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
759
760 if (!b)
761 return NULL;
762
763 if (c->alloc_callback)
764 c->alloc_callback(b);
765
766 return b;
767}
768
769
770
771
772static void __free_buffer_wake(struct dm_buffer *b)
773{
774 struct dm_bufio_client *c = b->c;
775
776 if (!c->need_reserved_buffers)
777 free_buffer(b);
778 else {
779 list_add(&b->lru_list, &c->reserved_buffers);
780 c->need_reserved_buffers--;
781 }
782
783 wake_up(&c->free_buffer_wait);
784}
785
786static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
787{
788 struct dm_buffer *b, *tmp;
789
790 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
791 BUG_ON(test_bit(B_READING, &b->state));
792
793 if (!test_bit(B_DIRTY, &b->state) &&
794 !test_bit(B_WRITING, &b->state)) {
795 __relink_lru(b, LIST_CLEAN);
796 continue;
797 }
798
799 if (no_wait && test_bit(B_WRITING, &b->state))
800 return;
801
802 __write_dirty_buffer(b);
803 dm_bufio_cond_resched();
804 }
805}
806
807
808
809
810static void __get_memory_limit(struct dm_bufio_client *c,
811 unsigned long *threshold_buffers,
812 unsigned long *limit_buffers)
813{
814 unsigned long buffers;
815
816 if (dm_bufio_cache_size != dm_bufio_cache_size_latch) {
817 mutex_lock(&dm_bufio_clients_lock);
818 __cache_size_refresh();
819 mutex_unlock(&dm_bufio_clients_lock);
820 }
821
822 buffers = dm_bufio_cache_size_per_client >>
823 (c->sectors_per_block_bits + SECTOR_SHIFT);
824
825 if (buffers < DM_BUFIO_MIN_BUFFERS)
826 buffers = DM_BUFIO_MIN_BUFFERS;
827
828 *limit_buffers = buffers;
829 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
830}
831
832
833
834
835
836
837static void __check_watermark(struct dm_bufio_client *c)
838{
839 unsigned long threshold_buffers, limit_buffers;
840
841 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
842
843 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
844 limit_buffers) {
845
846 struct dm_buffer *b = __get_unclaimed_buffer(c);
847
848 if (!b)
849 return;
850
851 __free_buffer_wake(b);
852 dm_bufio_cond_resched();
853 }
854
855 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
856 __write_dirty_buffers_async(c, 1);
857}
858
859
860
861
862static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
863{
864 struct dm_buffer *b;
865 struct hlist_node *hn;
866
867 hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
868 hash_list) {
869 dm_bufio_cond_resched();
870 if (b->block == block)
871 return b;
872 }
873
874 return NULL;
875}
876
877
878
879
880
881static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
882 enum new_flag nf, int *need_submit)
883{
884 struct dm_buffer *b, *new_b = NULL;
885
886 *need_submit = 0;
887
888 b = __find(c, block);
889 if (b)
890 goto found_buffer;
891
892 if (nf == NF_GET)
893 return NULL;
894
895 new_b = __alloc_buffer_wait(c, nf);
896 if (!new_b)
897 return NULL;
898
899
900
901
902
903 b = __find(c, block);
904 if (b) {
905 __free_buffer_wake(new_b);
906 goto found_buffer;
907 }
908
909 __check_watermark(c);
910
911 b = new_b;
912 b->hold_count = 1;
913 b->read_error = 0;
914 b->write_error = 0;
915 __link_buffer(b, block, LIST_CLEAN);
916
917 if (nf == NF_FRESH) {
918 b->state = 0;
919 return b;
920 }
921
922 b->state = 1 << B_READING;
923 *need_submit = 1;
924
925 return b;
926
927found_buffer:
928 if (nf == NF_PREFETCH)
929 return NULL;
930
931
932
933
934
935
936
937 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
938 return NULL;
939
940 b->hold_count++;
941 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
942 test_bit(B_WRITING, &b->state));
943 return b;
944}
945
946
947
948
949
950static void read_endio(struct bio *bio, int error)
951{
952 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
953
954 b->read_error = error;
955
956 BUG_ON(!test_bit(B_READING, &b->state));
957
958 smp_mb__before_clear_bit();
959 clear_bit(B_READING, &b->state);
960 smp_mb__after_clear_bit();
961
962 wake_up_bit(&b->state, B_READING);
963}
964
965
966
967
968
969
970
971static void *new_read(struct dm_bufio_client *c, sector_t block,
972 enum new_flag nf, struct dm_buffer **bp)
973{
974 int need_submit;
975 struct dm_buffer *b;
976
977 dm_bufio_lock(c);
978 b = __bufio_new(c, block, nf, &need_submit);
979 dm_bufio_unlock(c);
980
981 if (!b)
982 return b;
983
984 if (need_submit)
985 submit_io(b, READ, b->block, read_endio);
986
987 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
988
989 if (b->read_error) {
990 int error = b->read_error;
991
992 dm_bufio_release(b);
993
994 return ERR_PTR(error);
995 }
996
997 *bp = b;
998
999 return b->data;
1000}
1001
1002void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1003 struct dm_buffer **bp)
1004{
1005 return new_read(c, block, NF_GET, bp);
1006}
1007EXPORT_SYMBOL_GPL(dm_bufio_get);
1008
1009void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1010 struct dm_buffer **bp)
1011{
1012 BUG_ON(dm_bufio_in_request());
1013
1014 return new_read(c, block, NF_READ, bp);
1015}
1016EXPORT_SYMBOL_GPL(dm_bufio_read);
1017
1018void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1019 struct dm_buffer **bp)
1020{
1021 BUG_ON(dm_bufio_in_request());
1022
1023 return new_read(c, block, NF_FRESH, bp);
1024}
1025EXPORT_SYMBOL_GPL(dm_bufio_new);
1026
1027void dm_bufio_prefetch(struct dm_bufio_client *c,
1028 sector_t block, unsigned n_blocks)
1029{
1030 struct blk_plug plug;
1031
1032 blk_start_plug(&plug);
1033 dm_bufio_lock(c);
1034
1035 for (; n_blocks--; block++) {
1036 int need_submit;
1037 struct dm_buffer *b;
1038 b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
1039 if (unlikely(b != NULL)) {
1040 dm_bufio_unlock(c);
1041
1042 if (need_submit)
1043 submit_io(b, READ, b->block, read_endio);
1044 dm_bufio_release(b);
1045
1046 dm_bufio_cond_resched();
1047
1048 if (!n_blocks)
1049 goto flush_plug;
1050 dm_bufio_lock(c);
1051 }
1052
1053 }
1054
1055 dm_bufio_unlock(c);
1056
1057flush_plug:
1058 blk_finish_plug(&plug);
1059}
1060EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1061
1062void dm_bufio_release(struct dm_buffer *b)
1063{
1064 struct dm_bufio_client *c = b->c;
1065
1066 dm_bufio_lock(c);
1067
1068 BUG_ON(!b->hold_count);
1069
1070 b->hold_count--;
1071 if (!b->hold_count) {
1072 wake_up(&c->free_buffer_wait);
1073
1074
1075
1076
1077
1078
1079 if ((b->read_error || b->write_error) &&
1080 !test_bit(B_READING, &b->state) &&
1081 !test_bit(B_WRITING, &b->state) &&
1082 !test_bit(B_DIRTY, &b->state)) {
1083 __unlink_buffer(b);
1084 __free_buffer_wake(b);
1085 }
1086 }
1087
1088 dm_bufio_unlock(c);
1089}
1090EXPORT_SYMBOL_GPL(dm_bufio_release);
1091
1092void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1093{
1094 struct dm_bufio_client *c = b->c;
1095
1096 dm_bufio_lock(c);
1097
1098 BUG_ON(test_bit(B_READING, &b->state));
1099
1100 if (!test_and_set_bit(B_DIRTY, &b->state))
1101 __relink_lru(b, LIST_DIRTY);
1102
1103 dm_bufio_unlock(c);
1104}
1105EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1106
1107void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1108{
1109 BUG_ON(dm_bufio_in_request());
1110
1111 dm_bufio_lock(c);
1112 __write_dirty_buffers_async(c, 0);
1113 dm_bufio_unlock(c);
1114}
1115EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1116
1117
1118
1119
1120
1121
1122
1123
1124int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1125{
1126 int a, f;
1127 unsigned long buffers_processed = 0;
1128 struct dm_buffer *b, *tmp;
1129
1130 dm_bufio_lock(c);
1131 __write_dirty_buffers_async(c, 0);
1132
1133again:
1134 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1135 int dropped_lock = 0;
1136
1137 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1138 buffers_processed++;
1139
1140 BUG_ON(test_bit(B_READING, &b->state));
1141
1142 if (test_bit(B_WRITING, &b->state)) {
1143 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1144 dropped_lock = 1;
1145 b->hold_count++;
1146 dm_bufio_unlock(c);
1147 wait_on_bit(&b->state, B_WRITING,
1148 do_io_schedule,
1149 TASK_UNINTERRUPTIBLE);
1150 dm_bufio_lock(c);
1151 b->hold_count--;
1152 } else
1153 wait_on_bit(&b->state, B_WRITING,
1154 do_io_schedule,
1155 TASK_UNINTERRUPTIBLE);
1156 }
1157
1158 if (!test_bit(B_DIRTY, &b->state) &&
1159 !test_bit(B_WRITING, &b->state))
1160 __relink_lru(b, LIST_CLEAN);
1161
1162 dm_bufio_cond_resched();
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 if (dropped_lock)
1179 goto again;
1180 }
1181 wake_up(&c->free_buffer_wait);
1182 dm_bufio_unlock(c);
1183
1184 a = xchg(&c->async_write_error, 0);
1185 f = dm_bufio_issue_flush(c);
1186 if (a)
1187 return a;
1188
1189 return f;
1190}
1191EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1192
1193
1194
1195
1196int dm_bufio_issue_flush(struct dm_bufio_client *c)
1197{
1198 struct dm_io_request io_req = {
1199 .bi_rw = REQ_FLUSH,
1200 .mem.type = DM_IO_KMEM,
1201 .mem.ptr.addr = NULL,
1202 .client = c->dm_io,
1203 };
1204 struct dm_io_region io_reg = {
1205 .bdev = c->bdev,
1206 .sector = 0,
1207 .count = 0,
1208 };
1209
1210 BUG_ON(dm_bufio_in_request());
1211
1212 return dm_io(&io_req, 1, &io_reg, NULL);
1213}
1214EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1229{
1230 struct dm_bufio_client *c = b->c;
1231 struct dm_buffer *new;
1232
1233 BUG_ON(dm_bufio_in_request());
1234
1235 dm_bufio_lock(c);
1236
1237retry:
1238 new = __find(c, new_block);
1239 if (new) {
1240 if (new->hold_count) {
1241 __wait_for_free_buffer(c);
1242 goto retry;
1243 }
1244
1245
1246
1247
1248
1249 __make_buffer_clean(new);
1250 __unlink_buffer(new);
1251 __free_buffer_wake(new);
1252 }
1253
1254 BUG_ON(!b->hold_count);
1255 BUG_ON(test_bit(B_READING, &b->state));
1256
1257 __write_dirty_buffer(b);
1258 if (b->hold_count == 1) {
1259 wait_on_bit(&b->state, B_WRITING,
1260 do_io_schedule, TASK_UNINTERRUPTIBLE);
1261 set_bit(B_DIRTY, &b->state);
1262 __unlink_buffer(b);
1263 __link_buffer(b, new_block, LIST_DIRTY);
1264 } else {
1265 sector_t old_block;
1266 wait_on_bit_lock(&b->state, B_WRITING,
1267 do_io_schedule, TASK_UNINTERRUPTIBLE);
1268
1269
1270
1271
1272
1273
1274
1275 old_block = b->block;
1276 __unlink_buffer(b);
1277 __link_buffer(b, new_block, b->list_mode);
1278 submit_io(b, WRITE, new_block, write_endio);
1279 wait_on_bit(&b->state, B_WRITING,
1280 do_io_schedule, TASK_UNINTERRUPTIBLE);
1281 __unlink_buffer(b);
1282 __link_buffer(b, old_block, b->list_mode);
1283 }
1284
1285 dm_bufio_unlock(c);
1286 dm_bufio_release(b);
1287}
1288EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1289
1290unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1291{
1292 return c->block_size;
1293}
1294EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1295
1296sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1297{
1298 return i_size_read(c->bdev->bd_inode) >>
1299 (SECTOR_SHIFT + c->sectors_per_block_bits);
1300}
1301EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1302
1303sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1304{
1305 return b->block;
1306}
1307EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1308
1309void *dm_bufio_get_block_data(struct dm_buffer *b)
1310{
1311 return b->data;
1312}
1313EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1314
1315void *dm_bufio_get_aux_data(struct dm_buffer *b)
1316{
1317 return b + 1;
1318}
1319EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1320
1321struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1322{
1323 return b->c;
1324}
1325EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1326
1327static void drop_buffers(struct dm_bufio_client *c)
1328{
1329 struct dm_buffer *b;
1330 int i;
1331
1332 BUG_ON(dm_bufio_in_request());
1333
1334
1335
1336
1337 dm_bufio_write_dirty_buffers_async(c);
1338
1339 dm_bufio_lock(c);
1340
1341 while ((b = __get_unclaimed_buffer(c)))
1342 __free_buffer_wake(b);
1343
1344 for (i = 0; i < LIST_SIZE; i++)
1345 list_for_each_entry(b, &c->lru[i], lru_list)
1346 DMERR("leaked buffer %llx, hold count %u, list %d",
1347 (unsigned long long)b->block, b->hold_count, i);
1348
1349 for (i = 0; i < LIST_SIZE; i++)
1350 BUG_ON(!list_empty(&c->lru[i]));
1351
1352 dm_bufio_unlock(c);
1353}
1354
1355
1356
1357
1358
1359
1360
1361static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1362 unsigned long max_jiffies)
1363{
1364 if (jiffies - b->last_accessed < max_jiffies)
1365 return 1;
1366
1367 if (!(gfp & __GFP_IO)) {
1368 if (test_bit(B_READING, &b->state) ||
1369 test_bit(B_WRITING, &b->state) ||
1370 test_bit(B_DIRTY, &b->state))
1371 return 1;
1372 }
1373
1374 if (b->hold_count)
1375 return 1;
1376
1377 __make_buffer_clean(b);
1378 __unlink_buffer(b);
1379 __free_buffer_wake(b);
1380
1381 return 0;
1382}
1383
1384static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1385 struct shrink_control *sc)
1386{
1387 int l;
1388 struct dm_buffer *b, *tmp;
1389
1390 for (l = 0; l < LIST_SIZE; l++) {
1391 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1392 if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1393 !--nr_to_scan)
1394 return;
1395 dm_bufio_cond_resched();
1396 }
1397}
1398
1399static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1400{
1401 struct dm_bufio_client *c =
1402 container_of(shrinker, struct dm_bufio_client, shrinker);
1403 unsigned long r;
1404 unsigned long nr_to_scan = sc->nr_to_scan;
1405
1406 if (sc->gfp_mask & __GFP_IO)
1407 dm_bufio_lock(c);
1408 else if (!dm_bufio_trylock(c))
1409 return !nr_to_scan ? 0 : -1;
1410
1411 if (nr_to_scan)
1412 __scan(c, nr_to_scan, sc);
1413
1414 r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1415 if (r > INT_MAX)
1416 r = INT_MAX;
1417
1418 dm_bufio_unlock(c);
1419
1420 return r;
1421}
1422
1423
1424
1425
1426struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1427 unsigned reserved_buffers, unsigned aux_size,
1428 void (*alloc_callback)(struct dm_buffer *),
1429 void (*write_callback)(struct dm_buffer *))
1430{
1431 int r;
1432 struct dm_bufio_client *c;
1433 unsigned i;
1434
1435 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1436 (block_size & (block_size - 1)));
1437
1438 c = kmalloc(sizeof(*c), GFP_KERNEL);
1439 if (!c) {
1440 r = -ENOMEM;
1441 goto bad_client;
1442 }
1443 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1444 if (!c->cache_hash) {
1445 r = -ENOMEM;
1446 goto bad_hash;
1447 }
1448
1449 c->bdev = bdev;
1450 c->block_size = block_size;
1451 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1452 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1453 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1454 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1455 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1456
1457 c->aux_size = aux_size;
1458 c->alloc_callback = alloc_callback;
1459 c->write_callback = write_callback;
1460
1461 for (i = 0; i < LIST_SIZE; i++) {
1462 INIT_LIST_HEAD(&c->lru[i]);
1463 c->n_buffers[i] = 0;
1464 }
1465
1466 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1467 INIT_HLIST_HEAD(&c->cache_hash[i]);
1468
1469 mutex_init(&c->lock);
1470 INIT_LIST_HEAD(&c->reserved_buffers);
1471 c->need_reserved_buffers = reserved_buffers;
1472
1473 init_waitqueue_head(&c->free_buffer_wait);
1474 c->async_write_error = 0;
1475
1476 c->dm_io = dm_io_client_create();
1477 if (IS_ERR(c->dm_io)) {
1478 r = PTR_ERR(c->dm_io);
1479 goto bad_dm_io;
1480 }
1481
1482 mutex_lock(&dm_bufio_clients_lock);
1483 if (c->blocks_per_page_bits) {
1484 if (!DM_BUFIO_CACHE_NAME(c)) {
1485 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1486 if (!DM_BUFIO_CACHE_NAME(c)) {
1487 r = -ENOMEM;
1488 mutex_unlock(&dm_bufio_clients_lock);
1489 goto bad_cache;
1490 }
1491 }
1492
1493 if (!DM_BUFIO_CACHE(c)) {
1494 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1495 c->block_size,
1496 c->block_size, 0, NULL);
1497 if (!DM_BUFIO_CACHE(c)) {
1498 r = -ENOMEM;
1499 mutex_unlock(&dm_bufio_clients_lock);
1500 goto bad_cache;
1501 }
1502 }
1503 }
1504 mutex_unlock(&dm_bufio_clients_lock);
1505
1506 while (c->need_reserved_buffers) {
1507 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1508
1509 if (!b) {
1510 r = -ENOMEM;
1511 goto bad_buffer;
1512 }
1513 __free_buffer_wake(b);
1514 }
1515
1516 mutex_lock(&dm_bufio_clients_lock);
1517 dm_bufio_client_count++;
1518 list_add(&c->client_list, &dm_bufio_all_clients);
1519 __cache_size_refresh();
1520 mutex_unlock(&dm_bufio_clients_lock);
1521
1522 c->shrinker.shrink = shrink;
1523 c->shrinker.seeks = 1;
1524 c->shrinker.batch = 0;
1525 register_shrinker(&c->shrinker);
1526
1527 return c;
1528
1529bad_buffer:
1530bad_cache:
1531 while (!list_empty(&c->reserved_buffers)) {
1532 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1533 struct dm_buffer, lru_list);
1534 list_del(&b->lru_list);
1535 free_buffer(b);
1536 }
1537 dm_io_client_destroy(c->dm_io);
1538bad_dm_io:
1539 vfree(c->cache_hash);
1540bad_hash:
1541 kfree(c);
1542bad_client:
1543 return ERR_PTR(r);
1544}
1545EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1546
1547
1548
1549
1550
1551void dm_bufio_client_destroy(struct dm_bufio_client *c)
1552{
1553 unsigned i;
1554
1555 drop_buffers(c);
1556
1557 unregister_shrinker(&c->shrinker);
1558
1559 mutex_lock(&dm_bufio_clients_lock);
1560
1561 list_del(&c->client_list);
1562 dm_bufio_client_count--;
1563 __cache_size_refresh();
1564
1565 mutex_unlock(&dm_bufio_clients_lock);
1566
1567 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1568 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1569
1570 BUG_ON(c->need_reserved_buffers);
1571
1572 while (!list_empty(&c->reserved_buffers)) {
1573 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1574 struct dm_buffer, lru_list);
1575 list_del(&b->lru_list);
1576 free_buffer(b);
1577 }
1578
1579 for (i = 0; i < LIST_SIZE; i++)
1580 if (c->n_buffers[i])
1581 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1582
1583 for (i = 0; i < LIST_SIZE; i++)
1584 BUG_ON(c->n_buffers[i]);
1585
1586 dm_io_client_destroy(c->dm_io);
1587 vfree(c->cache_hash);
1588 kfree(c);
1589}
1590EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1591
1592static void cleanup_old_buffers(void)
1593{
1594 unsigned long max_age = dm_bufio_max_age;
1595 struct dm_bufio_client *c;
1596
1597 barrier();
1598
1599 if (max_age > ULONG_MAX / HZ)
1600 max_age = ULONG_MAX / HZ;
1601
1602 mutex_lock(&dm_bufio_clients_lock);
1603 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1604 if (!dm_bufio_trylock(c))
1605 continue;
1606
1607 while (!list_empty(&c->lru[LIST_CLEAN])) {
1608 struct dm_buffer *b;
1609 b = list_entry(c->lru[LIST_CLEAN].prev,
1610 struct dm_buffer, lru_list);
1611 if (__cleanup_old_buffer(b, 0, max_age * HZ))
1612 break;
1613 dm_bufio_cond_resched();
1614 }
1615
1616 dm_bufio_unlock(c);
1617 dm_bufio_cond_resched();
1618 }
1619 mutex_unlock(&dm_bufio_clients_lock);
1620}
1621
1622static struct workqueue_struct *dm_bufio_wq;
1623static struct delayed_work dm_bufio_work;
1624
1625static void work_fn(struct work_struct *w)
1626{
1627 cleanup_old_buffers();
1628
1629 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1630 DM_BUFIO_WORK_TIMER_SECS * HZ);
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641static int __init dm_bufio_init(void)
1642{
1643 __u64 mem;
1644
1645 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1646 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1647
1648 mem = (__u64)((totalram_pages - totalhigh_pages) *
1649 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1650
1651 if (mem > ULONG_MAX)
1652 mem = ULONG_MAX;
1653
1654#ifdef CONFIG_MMU
1655
1656
1657
1658
1659 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1660 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1661#endif
1662
1663 dm_bufio_default_cache_size = mem;
1664
1665 mutex_lock(&dm_bufio_clients_lock);
1666 __cache_size_refresh();
1667 mutex_unlock(&dm_bufio_clients_lock);
1668
1669 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1670 if (!dm_bufio_wq)
1671 return -ENOMEM;
1672
1673 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1674 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1675 DM_BUFIO_WORK_TIMER_SECS * HZ);
1676
1677 return 0;
1678}
1679
1680
1681
1682
1683static void __exit dm_bufio_exit(void)
1684{
1685 int bug = 0;
1686 int i;
1687
1688 cancel_delayed_work_sync(&dm_bufio_work);
1689 destroy_workqueue(dm_bufio_wq);
1690
1691 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1692 struct kmem_cache *kc = dm_bufio_caches[i];
1693
1694 if (kc)
1695 kmem_cache_destroy(kc);
1696 }
1697
1698 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1699 kfree(dm_bufio_cache_names[i]);
1700
1701 if (dm_bufio_client_count) {
1702 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1703 __func__, dm_bufio_client_count);
1704 bug = 1;
1705 }
1706
1707 if (dm_bufio_current_allocated) {
1708 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1709 __func__, dm_bufio_current_allocated);
1710 bug = 1;
1711 }
1712
1713 if (dm_bufio_allocated_get_free_pages) {
1714 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1715 __func__, dm_bufio_allocated_get_free_pages);
1716 bug = 1;
1717 }
1718
1719 if (dm_bufio_allocated_vmalloc) {
1720 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1721 __func__, dm_bufio_allocated_vmalloc);
1722 bug = 1;
1723 }
1724
1725 if (bug)
1726 BUG();
1727}
1728
1729module_init(dm_bufio_init)
1730module_exit(dm_bufio_exit)
1731
1732module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1733MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1734
1735module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1736MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1737
1738module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1739MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1740
1741module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1742MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1743
1744module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1745MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1746
1747module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1748MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1749
1750module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1751MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1752
1753MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1754MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1755MODULE_LICENSE("GPL");
1756