1
2
3
4
5
6
7
8
9#include "dm-bufio.h"
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/jiffies.h>
15#include <linux/vmalloc.h>
16#include <linux/shrinker.h>
17#include <linux/module.h>
18#include <linux/rbtree.h>
19#include <linux/stacktrace.h>
20
21#define DM_MSG_PREFIX "bufio"
22
23
24
25
26
27
28
29
30
31#define DM_BUFIO_MIN_BUFFERS 8
32
33#define DM_BUFIO_MEMORY_PERCENT 2
34#define DM_BUFIO_VMALLOC_PERCENT 25
35#define DM_BUFIO_WRITEBACK_PERCENT 75
36
37
38
39
40#define DM_BUFIO_WORK_TIMER_SECS 30
41
42
43
44
45#define DM_BUFIO_DEFAULT_AGE_SECS 300
46
47
48
49
50#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
51
52
53
54
55
56#define DM_BUFIO_INLINE_VECS 16
57
58
59
60
61
62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64
65
66
67
68#define LIST_CLEAN 0
69#define LIST_DIRTY 1
70#define LIST_SIZE 2
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87struct dm_bufio_client {
88 struct mutex lock;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
101
102 struct dm_io_client *dm_io;
103
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
106
107 unsigned minimum_buffers;
108
109 struct rb_root buffer_tree;
110 wait_queue_head_t free_buffer_wait;
111
112 int async_write_error;
113
114 struct list_head client_list;
115 struct shrinker shrinker;
116};
117
118
119
120
121#define B_READING 0
122#define B_WRITING 1
123#define B_DIRTY 2
124
125
126
127
128
129
130enum data_mode {
131 DATA_MODE_SLAB = 0,
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
134 DATA_MODE_LIMIT = 3
135};
136
137struct dm_buffer {
138 struct rb_node node;
139 struct list_head lru_list;
140 sector_t block;
141 void *data;
142 enum data_mode data_mode;
143 unsigned char list_mode;
144 unsigned hold_count;
145 int read_error;
146 int write_error;
147 unsigned long state;
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
150 struct list_head write_list;
151 struct bio bio;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
153#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
154#define MAX_STACK 10
155 struct stack_trace stack_trace;
156 unsigned long stack_entries[MAX_STACK];
157#endif
158};
159
160
161
162static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
163static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
164
165static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
166{
167 unsigned ret = c->blocks_per_page_bits - 1;
168
169 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
170
171 return ret;
172}
173
174#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
175#define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
176
177#define dm_bufio_in_request() (!!current->bio_list)
178
179static void dm_bufio_lock(struct dm_bufio_client *c)
180{
181 mutex_lock_nested(&c->lock, dm_bufio_in_request());
182}
183
184static int dm_bufio_trylock(struct dm_bufio_client *c)
185{
186 return mutex_trylock(&c->lock);
187}
188
189static void dm_bufio_unlock(struct dm_bufio_client *c)
190{
191 mutex_unlock(&c->lock);
192}
193
194
195
196
197
198
199static unsigned long dm_bufio_default_cache_size;
200
201
202
203
204static unsigned long dm_bufio_cache_size;
205
206
207
208
209
210static unsigned long dm_bufio_cache_size_latch;
211
212static DEFINE_SPINLOCK(param_spinlock);
213
214
215
216
217static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
218static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
219
220static unsigned long dm_bufio_peak_allocated;
221static unsigned long dm_bufio_allocated_kmem_cache;
222static unsigned long dm_bufio_allocated_get_free_pages;
223static unsigned long dm_bufio_allocated_vmalloc;
224static unsigned long dm_bufio_current_allocated;
225
226
227
228
229
230
231static unsigned long dm_bufio_cache_size_per_client;
232
233
234
235
236static int dm_bufio_client_count;
237
238
239
240
241static LIST_HEAD(dm_bufio_all_clients);
242
243
244
245
246
247static DEFINE_MUTEX(dm_bufio_clients_lock);
248
249#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
250static void buffer_record_stack(struct dm_buffer *b)
251{
252 b->stack_trace.nr_entries = 0;
253 b->stack_trace.max_entries = MAX_STACK;
254 b->stack_trace.entries = b->stack_entries;
255 b->stack_trace.skip = 2;
256 save_stack_trace(&b->stack_trace);
257}
258#endif
259
260
261
262
263static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
264{
265 struct rb_node *n = c->buffer_tree.rb_node;
266 struct dm_buffer *b;
267
268 while (n) {
269 b = container_of(n, struct dm_buffer, node);
270
271 if (b->block == block)
272 return b;
273
274 n = (b->block < block) ? n->rb_left : n->rb_right;
275 }
276
277 return NULL;
278}
279
280static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
281{
282 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
283 struct dm_buffer *found;
284
285 while (*new) {
286 found = container_of(*new, struct dm_buffer, node);
287
288 if (found->block == b->block) {
289 BUG_ON(found != b);
290 return;
291 }
292
293 parent = *new;
294 new = (found->block < b->block) ?
295 &((*new)->rb_left) : &((*new)->rb_right);
296 }
297
298 rb_link_node(&b->node, parent, new);
299 rb_insert_color(&b->node, &c->buffer_tree);
300}
301
302static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
303{
304 rb_erase(&b->node, &c->buffer_tree);
305}
306
307
308
309static void adjust_total_allocated(enum data_mode data_mode, long diff)
310{
311 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
312 &dm_bufio_allocated_kmem_cache,
313 &dm_bufio_allocated_get_free_pages,
314 &dm_bufio_allocated_vmalloc,
315 };
316
317 spin_lock(¶m_spinlock);
318
319 *class_ptr[data_mode] += diff;
320
321 dm_bufio_current_allocated += diff;
322
323 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
324 dm_bufio_peak_allocated = dm_bufio_current_allocated;
325
326 spin_unlock(¶m_spinlock);
327}
328
329
330
331
332static void __cache_size_refresh(void)
333{
334 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
335 BUG_ON(dm_bufio_client_count < 0);
336
337 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
338
339
340
341
342 if (!dm_bufio_cache_size_latch) {
343 (void)cmpxchg(&dm_bufio_cache_size, 0,
344 dm_bufio_default_cache_size);
345 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
346 }
347
348 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
349 (dm_bufio_client_count ? : 1);
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
374 enum data_mode *data_mode)
375{
376 unsigned noio_flag;
377 void *ptr;
378
379 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
380 *data_mode = DATA_MODE_SLAB;
381 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
382 }
383
384 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
385 gfp_mask & __GFP_NORETRY) {
386 *data_mode = DATA_MODE_GET_FREE_PAGES;
387 return (void *)__get_free_pages(gfp_mask,
388 c->pages_per_block_bits);
389 }
390
391 *data_mode = DATA_MODE_VMALLOC;
392
393
394
395
396
397
398
399
400
401
402
403 if (gfp_mask & __GFP_NORETRY)
404 noio_flag = memalloc_noio_save();
405
406 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
407
408 if (gfp_mask & __GFP_NORETRY)
409 memalloc_noio_restore(noio_flag);
410
411 return ptr;
412}
413
414
415
416
417static void free_buffer_data(struct dm_bufio_client *c,
418 void *data, enum data_mode data_mode)
419{
420 switch (data_mode) {
421 case DATA_MODE_SLAB:
422 kmem_cache_free(DM_BUFIO_CACHE(c), data);
423 break;
424
425 case DATA_MODE_GET_FREE_PAGES:
426 free_pages((unsigned long)data, c->pages_per_block_bits);
427 break;
428
429 case DATA_MODE_VMALLOC:
430 vfree(data);
431 break;
432
433 default:
434 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
435 data_mode);
436 BUG();
437 }
438}
439
440
441
442
443static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
444{
445 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
446 gfp_mask);
447
448 if (!b)
449 return NULL;
450
451 b->c = c;
452
453 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
454 if (!b->data) {
455 kfree(b);
456 return NULL;
457 }
458
459 adjust_total_allocated(b->data_mode, (long)c->block_size);
460
461#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
462 memset(&b->stack_trace, 0, sizeof(b->stack_trace));
463#endif
464 return b;
465}
466
467
468
469
470static void free_buffer(struct dm_buffer *b)
471{
472 struct dm_bufio_client *c = b->c;
473
474 adjust_total_allocated(b->data_mode, -(long)c->block_size);
475
476 free_buffer_data(c, b->data, b->data_mode);
477 kfree(b);
478}
479
480
481
482
483static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
484{
485 struct dm_bufio_client *c = b->c;
486
487 c->n_buffers[dirty]++;
488 b->block = block;
489 b->list_mode = dirty;
490 list_add(&b->lru_list, &c->lru[dirty]);
491 __insert(b->c, b);
492 b->last_accessed = jiffies;
493}
494
495
496
497
498static void __unlink_buffer(struct dm_buffer *b)
499{
500 struct dm_bufio_client *c = b->c;
501
502 BUG_ON(!c->n_buffers[b->list_mode]);
503
504 c->n_buffers[b->list_mode]--;
505 __remove(b->c, b);
506 list_del(&b->lru_list);
507}
508
509
510
511
512static void __relink_lru(struct dm_buffer *b, int dirty)
513{
514 struct dm_bufio_client *c = b->c;
515
516 BUG_ON(!c->n_buffers[b->list_mode]);
517
518 c->n_buffers[b->list_mode]--;
519 c->n_buffers[dirty]++;
520 b->list_mode = dirty;
521 list_move(&b->lru_list, &c->lru[dirty]);
522 b->last_accessed = jiffies;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551static void dmio_complete(unsigned long error, void *context)
552{
553 struct dm_buffer *b = context;
554
555 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
556}
557
558static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
559 bio_end_io_t *end_io)
560{
561 int r;
562 struct dm_io_request io_req = {
563 .bi_rw = rw,
564 .notify.fn = dmio_complete,
565 .notify.context = b,
566 .client = b->c->dm_io,
567 };
568 struct dm_io_region region = {
569 .bdev = b->c->bdev,
570 .sector = block << b->c->sectors_per_block_bits,
571 .count = b->c->block_size >> SECTOR_SHIFT,
572 };
573
574 if (b->data_mode != DATA_MODE_VMALLOC) {
575 io_req.mem.type = DM_IO_KMEM;
576 io_req.mem.ptr.addr = b->data;
577 } else {
578 io_req.mem.type = DM_IO_VMA;
579 io_req.mem.ptr.vma = b->data;
580 }
581
582 b->bio.bi_end_io = end_io;
583
584 r = dm_io(&io_req, 1, ®ion, NULL);
585 if (r)
586 end_io(&b->bio, r);
587}
588
589static void inline_endio(struct bio *bio, int error)
590{
591 bio_end_io_t *end_fn = bio->bi_private;
592
593
594
595
596
597 bio_reset(bio);
598
599 end_fn(bio, error);
600}
601
602static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
603 bio_end_io_t *end_io)
604{
605 char *ptr;
606 int len;
607
608 bio_init(&b->bio);
609 b->bio.bi_io_vec = b->bio_vec;
610 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
611 b->bio.bi_sector = block << b->c->sectors_per_block_bits;
612 b->bio.bi_bdev = b->c->bdev;
613 b->bio.bi_end_io = inline_endio;
614
615
616
617
618 b->bio.bi_private = end_io;
619
620
621
622
623
624 ptr = b->data;
625 len = b->c->block_size;
626
627 if (len >= PAGE_SIZE)
628 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
629 else
630 BUG_ON((unsigned long)ptr & (len - 1));
631
632 do {
633 if (!bio_add_page(&b->bio, virt_to_page(ptr),
634 len < PAGE_SIZE ? len : PAGE_SIZE,
635 offset_in_page(ptr))) {
636 BUG_ON(b->c->block_size <= PAGE_SIZE);
637 use_dmio(b, rw, block, end_io);
638 return;
639 }
640
641 len -= PAGE_SIZE;
642 ptr += PAGE_SIZE;
643 } while (len > 0);
644
645 submit_bio(rw, &b->bio);
646}
647
648static void submit_io(struct dm_buffer *b, int rw, sector_t block,
649 bio_end_io_t *end_io)
650{
651 if (rw == WRITE && b->c->write_callback)
652 b->c->write_callback(b);
653
654 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
655 b->data_mode != DATA_MODE_VMALLOC)
656 use_inline_bio(b, rw, block, end_io);
657 else
658 use_dmio(b, rw, block, end_io);
659}
660
661
662
663
664
665
666
667
668
669
670
671static void write_endio(struct bio *bio, int error)
672{
673 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
674
675 b->write_error = error;
676 if (unlikely(error)) {
677 struct dm_bufio_client *c = b->c;
678 (void)cmpxchg(&c->async_write_error, 0, error);
679 }
680
681 BUG_ON(!test_bit(B_WRITING, &b->state));
682
683 smp_mb__before_clear_bit();
684 clear_bit(B_WRITING, &b->state);
685 smp_mb__after_clear_bit();
686
687 wake_up_bit(&b->state, B_WRITING);
688}
689
690
691
692
693
694
695
696
697
698
699static void __write_dirty_buffer(struct dm_buffer *b,
700 struct list_head *write_list)
701{
702 if (!test_bit(B_DIRTY, &b->state))
703 return;
704
705 clear_bit(B_DIRTY, &b->state);
706 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
707
708 if (!write_list)
709 submit_io(b, WRITE, b->block, write_endio);
710 else
711 list_add_tail(&b->write_list, write_list);
712}
713
714static void __flush_write_list(struct list_head *write_list)
715{
716 struct blk_plug plug;
717 blk_start_plug(&plug);
718 while (!list_empty(write_list)) {
719 struct dm_buffer *b =
720 list_entry(write_list->next, struct dm_buffer, write_list);
721 list_del(&b->write_list);
722 submit_io(b, WRITE, b->block, write_endio);
723 cond_resched();
724 }
725 blk_finish_plug(&plug);
726}
727
728
729
730
731
732
733static void __make_buffer_clean(struct dm_buffer *b)
734{
735 BUG_ON(b->hold_count);
736
737 if (!b->state)
738 return;
739
740 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
741 __write_dirty_buffer(b, NULL);
742 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
743}
744
745
746
747
748
749static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
750{
751 struct dm_buffer *b;
752
753 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
754 BUG_ON(test_bit(B_WRITING, &b->state));
755 BUG_ON(test_bit(B_DIRTY, &b->state));
756
757 if (!b->hold_count) {
758 __make_buffer_clean(b);
759 __unlink_buffer(b);
760 return b;
761 }
762 cond_resched();
763 }
764
765 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
766 BUG_ON(test_bit(B_READING, &b->state));
767
768 if (!b->hold_count) {
769 __make_buffer_clean(b);
770 __unlink_buffer(b);
771 return b;
772 }
773 cond_resched();
774 }
775
776 return NULL;
777}
778
779
780
781
782
783
784
785
786static void __wait_for_free_buffer(struct dm_bufio_client *c)
787{
788 DECLARE_WAITQUEUE(wait, current);
789
790 add_wait_queue(&c->free_buffer_wait, &wait);
791 set_task_state(current, TASK_UNINTERRUPTIBLE);
792 dm_bufio_unlock(c);
793
794 io_schedule();
795
796 set_task_state(current, TASK_RUNNING);
797 remove_wait_queue(&c->free_buffer_wait, &wait);
798
799 dm_bufio_lock(c);
800}
801
802enum new_flag {
803 NF_FRESH = 0,
804 NF_READ = 1,
805 NF_GET = 2,
806 NF_PREFETCH = 3
807};
808
809
810
811
812
813
814
815static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
816{
817 struct dm_buffer *b;
818 bool tried_noio_alloc = false;
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833 while (1) {
834 if (dm_bufio_cache_size_latch != 1) {
835 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
836 if (b)
837 return b;
838 }
839
840 if (nf == NF_PREFETCH)
841 return NULL;
842
843 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
844 dm_bufio_unlock(c);
845 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
846 dm_bufio_lock(c);
847 if (b)
848 return b;
849 tried_noio_alloc = true;
850 }
851
852 if (!list_empty(&c->reserved_buffers)) {
853 b = list_entry(c->reserved_buffers.next,
854 struct dm_buffer, lru_list);
855 list_del(&b->lru_list);
856 c->need_reserved_buffers++;
857
858 return b;
859 }
860
861 b = __get_unclaimed_buffer(c);
862 if (b)
863 return b;
864
865 __wait_for_free_buffer(c);
866 }
867}
868
869static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
870{
871 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
872
873 if (!b)
874 return NULL;
875
876 if (c->alloc_callback)
877 c->alloc_callback(b);
878
879 return b;
880}
881
882
883
884
885static void __free_buffer_wake(struct dm_buffer *b)
886{
887 struct dm_bufio_client *c = b->c;
888
889 if (!c->need_reserved_buffers)
890 free_buffer(b);
891 else {
892 list_add(&b->lru_list, &c->reserved_buffers);
893 c->need_reserved_buffers--;
894 }
895
896 wake_up(&c->free_buffer_wait);
897}
898
899static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
900 struct list_head *write_list)
901{
902 struct dm_buffer *b, *tmp;
903
904 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
905 BUG_ON(test_bit(B_READING, &b->state));
906
907 if (!test_bit(B_DIRTY, &b->state) &&
908 !test_bit(B_WRITING, &b->state)) {
909 __relink_lru(b, LIST_CLEAN);
910 continue;
911 }
912
913 if (no_wait && test_bit(B_WRITING, &b->state))
914 return;
915
916 __write_dirty_buffer(b, write_list);
917 cond_resched();
918 }
919}
920
921
922
923
924static void __get_memory_limit(struct dm_bufio_client *c,
925 unsigned long *threshold_buffers,
926 unsigned long *limit_buffers)
927{
928 unsigned long buffers;
929
930 if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
931 if (mutex_trylock(&dm_bufio_clients_lock)) {
932 __cache_size_refresh();
933 mutex_unlock(&dm_bufio_clients_lock);
934 }
935 }
936
937 buffers = dm_bufio_cache_size_per_client >>
938 (c->sectors_per_block_bits + SECTOR_SHIFT);
939
940 if (buffers < c->minimum_buffers)
941 buffers = c->minimum_buffers;
942
943 *limit_buffers = buffers;
944 *threshold_buffers = mult_frac(buffers,
945 DM_BUFIO_WRITEBACK_PERCENT, 100);
946}
947
948
949
950
951
952
953static void __check_watermark(struct dm_bufio_client *c,
954 struct list_head *write_list)
955{
956 unsigned long threshold_buffers, limit_buffers;
957
958 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
959
960 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
961 limit_buffers) {
962
963 struct dm_buffer *b = __get_unclaimed_buffer(c);
964
965 if (!b)
966 return;
967
968 __free_buffer_wake(b);
969 cond_resched();
970 }
971
972 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
973 __write_dirty_buffers_async(c, 1, write_list);
974}
975
976
977
978
979
980static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
981 enum new_flag nf, int *need_submit,
982 struct list_head *write_list)
983{
984 struct dm_buffer *b, *new_b = NULL;
985
986 *need_submit = 0;
987
988 b = __find(c, block);
989 if (b)
990 goto found_buffer;
991
992 if (nf == NF_GET)
993 return NULL;
994
995 new_b = __alloc_buffer_wait(c, nf);
996 if (!new_b)
997 return NULL;
998
999
1000
1001
1002
1003 b = __find(c, block);
1004 if (b) {
1005 __free_buffer_wake(new_b);
1006 goto found_buffer;
1007 }
1008
1009 __check_watermark(c, write_list);
1010
1011 b = new_b;
1012 b->hold_count = 1;
1013 b->read_error = 0;
1014 b->write_error = 0;
1015 __link_buffer(b, block, LIST_CLEAN);
1016
1017 if (nf == NF_FRESH) {
1018 b->state = 0;
1019 return b;
1020 }
1021
1022 b->state = 1 << B_READING;
1023 *need_submit = 1;
1024
1025 return b;
1026
1027found_buffer:
1028 if (nf == NF_PREFETCH)
1029 return NULL;
1030
1031
1032
1033
1034
1035
1036
1037 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1038 return NULL;
1039
1040 b->hold_count++;
1041 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1042 test_bit(B_WRITING, &b->state));
1043 return b;
1044}
1045
1046
1047
1048
1049
1050static void read_endio(struct bio *bio, int error)
1051{
1052 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1053
1054 b->read_error = error;
1055
1056 BUG_ON(!test_bit(B_READING, &b->state));
1057
1058 smp_mb__before_clear_bit();
1059 clear_bit(B_READING, &b->state);
1060 smp_mb__after_clear_bit();
1061
1062 wake_up_bit(&b->state, B_READING);
1063}
1064
1065
1066
1067
1068
1069
1070
1071static void *new_read(struct dm_bufio_client *c, sector_t block,
1072 enum new_flag nf, struct dm_buffer **bp)
1073{
1074 int need_submit;
1075 struct dm_buffer *b;
1076
1077 LIST_HEAD(write_list);
1078
1079 dm_bufio_lock(c);
1080 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1081#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1082 if (b && b->hold_count == 1)
1083 buffer_record_stack(b);
1084#endif
1085 dm_bufio_unlock(c);
1086
1087 __flush_write_list(&write_list);
1088
1089 if (!b)
1090 return NULL;
1091
1092 if (need_submit)
1093 submit_io(b, READ, b->block, read_endio);
1094
1095 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1096
1097 if (b->read_error) {
1098 int error = b->read_error;
1099
1100 dm_bufio_release(b);
1101
1102 return ERR_PTR(error);
1103 }
1104
1105 *bp = b;
1106
1107 return b->data;
1108}
1109
1110void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1111 struct dm_buffer **bp)
1112{
1113 return new_read(c, block, NF_GET, bp);
1114}
1115EXPORT_SYMBOL_GPL(dm_bufio_get);
1116
1117void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1118 struct dm_buffer **bp)
1119{
1120 BUG_ON(dm_bufio_in_request());
1121
1122 return new_read(c, block, NF_READ, bp);
1123}
1124EXPORT_SYMBOL_GPL(dm_bufio_read);
1125
1126void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1127 struct dm_buffer **bp)
1128{
1129 BUG_ON(dm_bufio_in_request());
1130
1131 return new_read(c, block, NF_FRESH, bp);
1132}
1133EXPORT_SYMBOL_GPL(dm_bufio_new);
1134
1135void dm_bufio_prefetch(struct dm_bufio_client *c,
1136 sector_t block, unsigned n_blocks)
1137{
1138 struct blk_plug plug;
1139
1140 LIST_HEAD(write_list);
1141
1142 BUG_ON(dm_bufio_in_request());
1143
1144 blk_start_plug(&plug);
1145 dm_bufio_lock(c);
1146
1147 for (; n_blocks--; block++) {
1148 int need_submit;
1149 struct dm_buffer *b;
1150 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1151 &write_list);
1152 if (unlikely(!list_empty(&write_list))) {
1153 dm_bufio_unlock(c);
1154 blk_finish_plug(&plug);
1155 __flush_write_list(&write_list);
1156 blk_start_plug(&plug);
1157 dm_bufio_lock(c);
1158 }
1159 if (unlikely(b != NULL)) {
1160 dm_bufio_unlock(c);
1161
1162 if (need_submit)
1163 submit_io(b, READ, b->block, read_endio);
1164 dm_bufio_release(b);
1165
1166 cond_resched();
1167
1168 if (!n_blocks)
1169 goto flush_plug;
1170 dm_bufio_lock(c);
1171 }
1172 }
1173
1174 dm_bufio_unlock(c);
1175
1176flush_plug:
1177 blk_finish_plug(&plug);
1178}
1179EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1180
1181void dm_bufio_release(struct dm_buffer *b)
1182{
1183 struct dm_bufio_client *c = b->c;
1184
1185 dm_bufio_lock(c);
1186
1187 BUG_ON(!b->hold_count);
1188
1189 b->hold_count--;
1190 if (!b->hold_count) {
1191 wake_up(&c->free_buffer_wait);
1192
1193
1194
1195
1196
1197
1198 if ((b->read_error || b->write_error) &&
1199 !test_bit(B_READING, &b->state) &&
1200 !test_bit(B_WRITING, &b->state) &&
1201 !test_bit(B_DIRTY, &b->state)) {
1202 __unlink_buffer(b);
1203 __free_buffer_wake(b);
1204 }
1205 }
1206
1207 dm_bufio_unlock(c);
1208}
1209EXPORT_SYMBOL_GPL(dm_bufio_release);
1210
1211void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1212{
1213 struct dm_bufio_client *c = b->c;
1214
1215 dm_bufio_lock(c);
1216
1217 BUG_ON(test_bit(B_READING, &b->state));
1218
1219 if (!test_and_set_bit(B_DIRTY, &b->state))
1220 __relink_lru(b, LIST_DIRTY);
1221
1222 dm_bufio_unlock(c);
1223}
1224EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1225
1226void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1227{
1228 LIST_HEAD(write_list);
1229
1230 BUG_ON(dm_bufio_in_request());
1231
1232 dm_bufio_lock(c);
1233 __write_dirty_buffers_async(c, 0, &write_list);
1234 dm_bufio_unlock(c);
1235 __flush_write_list(&write_list);
1236}
1237EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1238
1239
1240
1241
1242
1243
1244
1245
1246int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1247{
1248 int a, f;
1249 unsigned long buffers_processed = 0;
1250 struct dm_buffer *b, *tmp;
1251
1252 LIST_HEAD(write_list);
1253
1254 dm_bufio_lock(c);
1255 __write_dirty_buffers_async(c, 0, &write_list);
1256 dm_bufio_unlock(c);
1257 __flush_write_list(&write_list);
1258 dm_bufio_lock(c);
1259
1260again:
1261 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1262 int dropped_lock = 0;
1263
1264 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1265 buffers_processed++;
1266
1267 BUG_ON(test_bit(B_READING, &b->state));
1268
1269 if (test_bit(B_WRITING, &b->state)) {
1270 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1271 dropped_lock = 1;
1272 b->hold_count++;
1273 dm_bufio_unlock(c);
1274 wait_on_bit_io(&b->state, B_WRITING,
1275 TASK_UNINTERRUPTIBLE);
1276 dm_bufio_lock(c);
1277 b->hold_count--;
1278 } else
1279 wait_on_bit_io(&b->state, B_WRITING,
1280 TASK_UNINTERRUPTIBLE);
1281 }
1282
1283 if (!test_bit(B_DIRTY, &b->state) &&
1284 !test_bit(B_WRITING, &b->state))
1285 __relink_lru(b, LIST_CLEAN);
1286
1287 cond_resched();
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 if (dropped_lock)
1304 goto again;
1305 }
1306 wake_up(&c->free_buffer_wait);
1307 dm_bufio_unlock(c);
1308
1309 a = xchg(&c->async_write_error, 0);
1310 f = dm_bufio_issue_flush(c);
1311 if (a)
1312 return a;
1313
1314 return f;
1315}
1316EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1317
1318
1319
1320
1321int dm_bufio_issue_flush(struct dm_bufio_client *c)
1322{
1323 struct dm_io_request io_req = {
1324 .bi_rw = WRITE_FLUSH,
1325 .mem.type = DM_IO_KMEM,
1326 .mem.ptr.addr = NULL,
1327 .client = c->dm_io,
1328 };
1329 struct dm_io_region io_reg = {
1330 .bdev = c->bdev,
1331 .sector = 0,
1332 .count = 0,
1333 };
1334
1335 BUG_ON(dm_bufio_in_request());
1336
1337 return dm_io(&io_req, 1, &io_reg, NULL);
1338}
1339EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1354{
1355 struct dm_bufio_client *c = b->c;
1356 struct dm_buffer *new;
1357
1358 BUG_ON(dm_bufio_in_request());
1359
1360 dm_bufio_lock(c);
1361
1362retry:
1363 new = __find(c, new_block);
1364 if (new) {
1365 if (new->hold_count) {
1366 __wait_for_free_buffer(c);
1367 goto retry;
1368 }
1369
1370
1371
1372
1373
1374 __make_buffer_clean(new);
1375 __unlink_buffer(new);
1376 __free_buffer_wake(new);
1377 }
1378
1379 BUG_ON(!b->hold_count);
1380 BUG_ON(test_bit(B_READING, &b->state));
1381
1382 __write_dirty_buffer(b, NULL);
1383 if (b->hold_count == 1) {
1384 wait_on_bit_io(&b->state, B_WRITING,
1385 TASK_UNINTERRUPTIBLE);
1386 set_bit(B_DIRTY, &b->state);
1387 __unlink_buffer(b);
1388 __link_buffer(b, new_block, LIST_DIRTY);
1389 } else {
1390 sector_t old_block;
1391 wait_on_bit_lock_io(&b->state, B_WRITING,
1392 TASK_UNINTERRUPTIBLE);
1393
1394
1395
1396
1397
1398
1399
1400 old_block = b->block;
1401 __unlink_buffer(b);
1402 __link_buffer(b, new_block, b->list_mode);
1403 submit_io(b, WRITE, new_block, write_endio);
1404 wait_on_bit_io(&b->state, B_WRITING,
1405 TASK_UNINTERRUPTIBLE);
1406 __unlink_buffer(b);
1407 __link_buffer(b, old_block, b->list_mode);
1408 }
1409
1410 dm_bufio_unlock(c);
1411 dm_bufio_release(b);
1412}
1413EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1414
1415
1416
1417
1418
1419
1420
1421void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1422{
1423 struct dm_buffer *b;
1424
1425 dm_bufio_lock(c);
1426
1427 b = __find(c, block);
1428 if (b && likely(!b->hold_count) && likely(!b->state)) {
1429 __unlink_buffer(b);
1430 __free_buffer_wake(b);
1431 }
1432
1433 dm_bufio_unlock(c);
1434}
1435EXPORT_SYMBOL(dm_bufio_forget);
1436
1437void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1438{
1439 c->minimum_buffers = n;
1440}
1441EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1442
1443unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1444{
1445 return c->block_size;
1446}
1447EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1448
1449sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1450{
1451 return i_size_read(c->bdev->bd_inode) >>
1452 (SECTOR_SHIFT + c->sectors_per_block_bits);
1453}
1454EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1455
1456sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1457{
1458 return b->block;
1459}
1460EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1461
1462void *dm_bufio_get_block_data(struct dm_buffer *b)
1463{
1464 return b->data;
1465}
1466EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1467
1468void *dm_bufio_get_aux_data(struct dm_buffer *b)
1469{
1470 return b + 1;
1471}
1472EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1473
1474struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1475{
1476 return b->c;
1477}
1478EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1479
1480static void drop_buffers(struct dm_bufio_client *c)
1481{
1482 struct dm_buffer *b;
1483 int i;
1484 bool warned = false;
1485
1486 BUG_ON(dm_bufio_in_request());
1487
1488
1489
1490
1491 dm_bufio_write_dirty_buffers_async(c);
1492
1493 dm_bufio_lock(c);
1494
1495 while ((b = __get_unclaimed_buffer(c)))
1496 __free_buffer_wake(b);
1497
1498 for (i = 0; i < LIST_SIZE; i++)
1499 list_for_each_entry(b, &c->lru[i], lru_list) {
1500 WARN_ON(!warned);
1501 warned = true;
1502 DMERR("leaked buffer %llx, hold count %u, list %d",
1503 (unsigned long long)b->block, b->hold_count, i);
1504#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1505 print_stack_trace(&b->stack_trace, 1);
1506 b->hold_count = 0;
1507#endif
1508 }
1509
1510#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1511 while ((b = __get_unclaimed_buffer(c)))
1512 __free_buffer_wake(b);
1513#endif
1514
1515 for (i = 0; i < LIST_SIZE; i++)
1516 BUG_ON(!list_empty(&c->lru[i]));
1517
1518 dm_bufio_unlock(c);
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1530{
1531 if (!(gfp & __GFP_FS)) {
1532 if (test_bit(B_READING, &b->state) ||
1533 test_bit(B_WRITING, &b->state) ||
1534 test_bit(B_DIRTY, &b->state))
1535 return false;
1536 }
1537
1538 if (b->hold_count)
1539 return false;
1540
1541 __make_buffer_clean(b);
1542 __unlink_buffer(b);
1543 __free_buffer_wake(b);
1544
1545 return true;
1546}
1547
1548static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1549{
1550 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1551 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1552}
1553
1554static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1555 struct shrink_control *sc)
1556{
1557 int l;
1558 struct dm_buffer *b, *tmp;
1559 unsigned long freed = 0;
1560 unsigned long count = nr_to_scan;
1561 unsigned long retain_target = get_retain_buffers(c);
1562
1563 for (l = 0; l < LIST_SIZE; l++) {
1564 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1565 if (__try_evict_buffer(b, sc->gfp_mask))
1566 freed++;
1567 if (!--nr_to_scan || ((count - freed) <= retain_target))
1568 return;
1569 cond_resched();
1570 }
1571 }
1572}
1573
1574static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1575{
1576 struct dm_bufio_client *c =
1577 container_of(shrinker, struct dm_bufio_client, shrinker);
1578 unsigned long r;
1579 unsigned long nr_to_scan = sc->nr_to_scan;
1580
1581 if (sc->gfp_mask & __GFP_FS)
1582 dm_bufio_lock(c);
1583 else if (!dm_bufio_trylock(c))
1584 return !nr_to_scan ? 0 : -1;
1585
1586 if (nr_to_scan)
1587 __scan(c, nr_to_scan, sc);
1588
1589 r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1590 if (r > INT_MAX)
1591 r = INT_MAX;
1592
1593 dm_bufio_unlock(c);
1594
1595 return r;
1596}
1597
1598
1599
1600
1601struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1602 unsigned reserved_buffers, unsigned aux_size,
1603 void (*alloc_callback)(struct dm_buffer *),
1604 void (*write_callback)(struct dm_buffer *))
1605{
1606 int r;
1607 struct dm_bufio_client *c;
1608 unsigned i;
1609
1610 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1611 (block_size & (block_size - 1)));
1612
1613 c = kzalloc(sizeof(*c), GFP_KERNEL);
1614 if (!c) {
1615 r = -ENOMEM;
1616 goto bad_client;
1617 }
1618 c->buffer_tree = RB_ROOT;
1619
1620 c->bdev = bdev;
1621 c->block_size = block_size;
1622 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1623 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1624 __ffs(block_size) - PAGE_SHIFT : 0;
1625 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1626 PAGE_SHIFT - __ffs(block_size) : 0);
1627
1628 c->aux_size = aux_size;
1629 c->alloc_callback = alloc_callback;
1630 c->write_callback = write_callback;
1631
1632 for (i = 0; i < LIST_SIZE; i++) {
1633 INIT_LIST_HEAD(&c->lru[i]);
1634 c->n_buffers[i] = 0;
1635 }
1636
1637 mutex_init(&c->lock);
1638 INIT_LIST_HEAD(&c->reserved_buffers);
1639 c->need_reserved_buffers = reserved_buffers;
1640
1641 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1642
1643 init_waitqueue_head(&c->free_buffer_wait);
1644 c->async_write_error = 0;
1645
1646 c->dm_io = dm_io_client_create();
1647 if (IS_ERR(c->dm_io)) {
1648 r = PTR_ERR(c->dm_io);
1649 goto bad_dm_io;
1650 }
1651
1652 mutex_lock(&dm_bufio_clients_lock);
1653 if (c->blocks_per_page_bits) {
1654 if (!DM_BUFIO_CACHE_NAME(c)) {
1655 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1656 if (!DM_BUFIO_CACHE_NAME(c)) {
1657 r = -ENOMEM;
1658 mutex_unlock(&dm_bufio_clients_lock);
1659 goto bad_cache;
1660 }
1661 }
1662
1663 if (!DM_BUFIO_CACHE(c)) {
1664 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1665 c->block_size,
1666 c->block_size, 0, NULL);
1667 if (!DM_BUFIO_CACHE(c)) {
1668 r = -ENOMEM;
1669 mutex_unlock(&dm_bufio_clients_lock);
1670 goto bad_cache;
1671 }
1672 }
1673 }
1674 mutex_unlock(&dm_bufio_clients_lock);
1675
1676 while (c->need_reserved_buffers) {
1677 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1678
1679 if (!b) {
1680 r = -ENOMEM;
1681 goto bad_buffer;
1682 }
1683 __free_buffer_wake(b);
1684 }
1685
1686 mutex_lock(&dm_bufio_clients_lock);
1687 dm_bufio_client_count++;
1688 list_add(&c->client_list, &dm_bufio_all_clients);
1689 __cache_size_refresh();
1690 mutex_unlock(&dm_bufio_clients_lock);
1691
1692 c->shrinker.shrink = shrink;
1693 c->shrinker.seeks = 1;
1694 c->shrinker.batch = 0;
1695 register_shrinker(&c->shrinker);
1696
1697 return c;
1698
1699bad_buffer:
1700bad_cache:
1701 while (!list_empty(&c->reserved_buffers)) {
1702 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1703 struct dm_buffer, lru_list);
1704 list_del(&b->lru_list);
1705 free_buffer(b);
1706 }
1707 dm_io_client_destroy(c->dm_io);
1708bad_dm_io:
1709 kfree(c);
1710bad_client:
1711 return ERR_PTR(r);
1712}
1713EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1714
1715
1716
1717
1718
1719void dm_bufio_client_destroy(struct dm_bufio_client *c)
1720{
1721 unsigned i;
1722
1723 drop_buffers(c);
1724
1725 unregister_shrinker(&c->shrinker);
1726
1727 mutex_lock(&dm_bufio_clients_lock);
1728
1729 list_del(&c->client_list);
1730 dm_bufio_client_count--;
1731 __cache_size_refresh();
1732
1733 mutex_unlock(&dm_bufio_clients_lock);
1734
1735 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1736 BUG_ON(c->need_reserved_buffers);
1737
1738 while (!list_empty(&c->reserved_buffers)) {
1739 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1740 struct dm_buffer, lru_list);
1741 list_del(&b->lru_list);
1742 free_buffer(b);
1743 }
1744
1745 for (i = 0; i < LIST_SIZE; i++)
1746 if (c->n_buffers[i])
1747 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1748
1749 for (i = 0; i < LIST_SIZE; i++)
1750 BUG_ON(c->n_buffers[i]);
1751
1752 dm_io_client_destroy(c->dm_io);
1753 kfree(c);
1754}
1755EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1756
1757static unsigned get_max_age_hz(void)
1758{
1759 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1760
1761 if (max_age > UINT_MAX / HZ)
1762 max_age = UINT_MAX / HZ;
1763
1764 return max_age * HZ;
1765}
1766
1767static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1768{
1769 return time_after_eq(jiffies, b->last_accessed + age_hz);
1770}
1771
1772static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1773{
1774 struct dm_buffer *b, *tmp;
1775 unsigned long retain_target = get_retain_buffers(c);
1776 unsigned long count;
1777 LIST_HEAD(write_list);
1778
1779 dm_bufio_lock(c);
1780
1781 __check_watermark(c, &write_list);
1782 if (unlikely(!list_empty(&write_list))) {
1783 dm_bufio_unlock(c);
1784 __flush_write_list(&write_list);
1785 dm_bufio_lock(c);
1786 }
1787
1788 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1789 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1790 if (count <= retain_target)
1791 break;
1792
1793 if (!older_than(b, age_hz))
1794 break;
1795
1796 if (__try_evict_buffer(b, 0))
1797 count--;
1798
1799 cond_resched();
1800 }
1801
1802 dm_bufio_unlock(c);
1803}
1804
1805static void cleanup_old_buffers(void)
1806{
1807 unsigned long max_age_hz = get_max_age_hz();
1808 struct dm_bufio_client *c;
1809
1810 mutex_lock(&dm_bufio_clients_lock);
1811
1812 __cache_size_refresh();
1813
1814 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1815 __evict_old_buffers(c, max_age_hz);
1816
1817 mutex_unlock(&dm_bufio_clients_lock);
1818}
1819
1820static struct workqueue_struct *dm_bufio_wq;
1821static struct delayed_work dm_bufio_work;
1822
1823static void work_fn(struct work_struct *w)
1824{
1825 cleanup_old_buffers();
1826
1827 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1828 DM_BUFIO_WORK_TIMER_SECS * HZ);
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839static int __init dm_bufio_init(void)
1840{
1841 __u64 mem;
1842
1843 dm_bufio_allocated_kmem_cache = 0;
1844 dm_bufio_allocated_get_free_pages = 0;
1845 dm_bufio_allocated_vmalloc = 0;
1846 dm_bufio_current_allocated = 0;
1847
1848 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1849 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1850
1851 mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1852 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1853
1854 if (mem > ULONG_MAX)
1855 mem = ULONG_MAX;
1856
1857#ifdef CONFIG_MMU
1858 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1859 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1860#endif
1861
1862 dm_bufio_default_cache_size = mem;
1863
1864 mutex_lock(&dm_bufio_clients_lock);
1865 __cache_size_refresh();
1866 mutex_unlock(&dm_bufio_clients_lock);
1867
1868 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1869 if (!dm_bufio_wq)
1870 return -ENOMEM;
1871
1872 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1873 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1874 DM_BUFIO_WORK_TIMER_SECS * HZ);
1875
1876 return 0;
1877}
1878
1879
1880
1881
1882static void __exit dm_bufio_exit(void)
1883{
1884 int bug = 0;
1885 int i;
1886
1887 cancel_delayed_work_sync(&dm_bufio_work);
1888 destroy_workqueue(dm_bufio_wq);
1889
1890 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1891 kmem_cache_destroy(dm_bufio_caches[i]);
1892
1893 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1894 kfree(dm_bufio_cache_names[i]);
1895
1896 if (dm_bufio_client_count) {
1897 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1898 __func__, dm_bufio_client_count);
1899 bug = 1;
1900 }
1901
1902 if (dm_bufio_current_allocated) {
1903 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1904 __func__, dm_bufio_current_allocated);
1905 bug = 1;
1906 }
1907
1908 if (dm_bufio_allocated_get_free_pages) {
1909 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1910 __func__, dm_bufio_allocated_get_free_pages);
1911 bug = 1;
1912 }
1913
1914 if (dm_bufio_allocated_vmalloc) {
1915 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1916 __func__, dm_bufio_allocated_vmalloc);
1917 bug = 1;
1918 }
1919
1920 BUG_ON(bug);
1921}
1922
1923module_init(dm_bufio_init)
1924module_exit(dm_bufio_exit)
1925
1926module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1927MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1928
1929module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1930MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1931
1932module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1933MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1934
1935module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1936MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1937
1938module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1939MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1940
1941module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1942MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1943
1944module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1945MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1946
1947module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1948MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1949
1950MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1951MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1952MODULE_LICENSE("GPL");
1953