1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/bio.h>
8#include <linux/err.h>
9#include <linux/hash.h>
10#include <linux/list.h>
11#include <linux/log2.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/dm-io.h>
16#include <linux/mutex.h>
17#include <linux/atomic.h>
18#include <linux/bitops.h>
19#include <linux/blkdev.h>
20#include <linux/kdev_t.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/mempool.h>
25#include <linux/spinlock.h>
26#include <linux/blk_types.h>
27#include <linux/dm-kcopyd.h>
28#include <linux/workqueue.h>
29#include <linux/backing-dev.h>
30#include <linux/device-mapper.h>
31
32#include "dm.h"
33#include "dm-clone-metadata.h"
34
35#define DM_MSG_PREFIX "clone"
36
37
38
39
40#define MIN_REGION_SIZE (1 << 3)
41#define MAX_REGION_SIZE (1 << 21)
42
43#define MIN_HYDRATIONS 256
44#define DEFAULT_HYDRATION_THRESHOLD 1
45#define DEFAULT_HYDRATION_BATCH_SIZE 1
46
47#define COMMIT_PERIOD HZ
48
49
50
51
52#define HASH_TABLE_BITS 15
53
54DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55 "A percentage of time allocated for hydrating regions");
56
57
58static struct kmem_cache *_hydration_cache;
59
60
61enum clone_metadata_mode {
62 CM_WRITE,
63 CM_READ_ONLY,
64 CM_FAIL,
65};
66
67struct hash_table_bucket;
68
69struct clone {
70 struct dm_target *ti;
71
72 struct dm_dev *metadata_dev;
73 struct dm_dev *dest_dev;
74 struct dm_dev *source_dev;
75
76 unsigned long nr_regions;
77 sector_t region_size;
78 unsigned int region_shift;
79
80
81
82
83
84 struct mutex commit_lock;
85
86 struct dm_clone_metadata *cmd;
87
88
89 struct hash_table_bucket *ht;
90
91 atomic_t ios_in_flight;
92
93 wait_queue_head_t hydration_stopped;
94
95 mempool_t hydration_pool;
96
97 unsigned long last_commit_jiffies;
98
99
100
101
102
103
104
105
106 spinlock_t lock;
107 struct bio_list deferred_bios;
108 struct bio_list deferred_discard_bios;
109 struct bio_list deferred_flush_bios;
110 struct bio_list deferred_flush_completions;
111
112
113 unsigned int hydration_threshold;
114
115
116 unsigned int hydration_batch_size;
117
118
119 unsigned long hydration_offset;
120
121 atomic_t hydrations_in_flight;
122
123
124
125
126
127 unsigned int nr_ctr_args;
128 const char **ctr_args;
129
130 struct workqueue_struct *wq;
131 struct work_struct worker;
132 struct delayed_work waker;
133
134 struct dm_kcopyd_client *kcopyd_client;
135
136 enum clone_metadata_mode mode;
137 unsigned long flags;
138};
139
140
141
142
143#define DM_CLONE_DISCARD_PASSDOWN 0
144#define DM_CLONE_HYDRATION_ENABLED 1
145#define DM_CLONE_HYDRATION_SUSPENDED 2
146
147
148
149
150
151
152static enum clone_metadata_mode get_clone_mode(struct clone *clone)
153{
154 return READ_ONCE(clone->mode);
155}
156
157static const char *clone_device_name(struct clone *clone)
158{
159 return dm_table_device_name(clone->ti->table);
160}
161
162static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
163{
164 static const char * const descs[] = {
165 "read-write",
166 "read-only",
167 "fail"
168 };
169
170 enum clone_metadata_mode old_mode = get_clone_mode(clone);
171
172
173 if (old_mode == CM_FAIL)
174 new_mode = CM_FAIL;
175
176 switch (new_mode) {
177 case CM_FAIL:
178 case CM_READ_ONLY:
179 dm_clone_metadata_set_read_only(clone->cmd);
180 break;
181
182 case CM_WRITE:
183 dm_clone_metadata_set_read_write(clone->cmd);
184 break;
185 }
186
187 WRITE_ONCE(clone->mode, new_mode);
188
189 if (new_mode != old_mode) {
190 dm_table_event(clone->ti->table);
191 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
192 descs[(int)new_mode]);
193 }
194}
195
196static void __abort_transaction(struct clone *clone)
197{
198 const char *dev_name = clone_device_name(clone);
199
200 if (get_clone_mode(clone) >= CM_READ_ONLY)
201 return;
202
203 DMERR("%s: Aborting current metadata transaction", dev_name);
204 if (dm_clone_metadata_abort(clone->cmd)) {
205 DMERR("%s: Failed to abort metadata transaction", dev_name);
206 __set_clone_mode(clone, CM_FAIL);
207 }
208}
209
210static void __reload_in_core_bitset(struct clone *clone)
211{
212 const char *dev_name = clone_device_name(clone);
213
214 if (get_clone_mode(clone) == CM_FAIL)
215 return;
216
217
218 DMINFO("%s: Reloading on-disk bitmap", dev_name);
219 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
220 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
221 __set_clone_mode(clone, CM_FAIL);
222 }
223}
224
225static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
226{
227 DMERR("%s: Metadata operation `%s' failed: error = %d",
228 clone_device_name(clone), op, r);
229
230 __abort_transaction(clone);
231 __set_clone_mode(clone, CM_READ_ONLY);
232
233
234
235
236
237
238 __reload_in_core_bitset(clone);
239}
240
241
242
243
244static inline void wakeup_hydration_waiters(struct clone *clone)
245{
246 wake_up_all(&clone->hydration_stopped);
247}
248
249static inline void wake_worker(struct clone *clone)
250{
251 queue_work(clone->wq, &clone->worker);
252}
253
254
255
256
257
258
259static inline void remap_to_source(struct clone *clone, struct bio *bio)
260{
261 bio_set_dev(bio, clone->source_dev->bdev);
262}
263
264static inline void remap_to_dest(struct clone *clone, struct bio *bio)
265{
266 bio_set_dev(bio, clone->dest_dev->bdev);
267}
268
269static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
270{
271 return op_is_flush(bio->bi_opf) &&
272 dm_clone_changed_this_transaction(clone->cmd);
273}
274
275
276static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
277{
278 return ((sector_t)region_nr << clone->region_shift);
279}
280
281
282static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
283{
284 return (bio->bi_iter.bi_sector >> clone->region_shift);
285}
286
287
288static void bio_region_range(struct clone *clone, struct bio *bio,
289 unsigned long *rs, unsigned long *nr_regions)
290{
291 unsigned long end;
292
293 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
294 end = bio_end_sector(bio) >> clone->region_shift;
295
296 if (*rs >= end)
297 *nr_regions = 0;
298 else
299 *nr_regions = end - *rs;
300}
301
302
303static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
304{
305 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
306}
307
308static void fail_bios(struct bio_list *bios, blk_status_t status)
309{
310 struct bio *bio;
311
312 while ((bio = bio_list_pop(bios))) {
313 bio->bi_status = status;
314 bio_endio(bio);
315 }
316}
317
318static void submit_bios(struct bio_list *bios)
319{
320 struct bio *bio;
321 struct blk_plug plug;
322
323 blk_start_plug(&plug);
324
325 while ((bio = bio_list_pop(bios)))
326 submit_bio_noacct(bio);
327
328 blk_finish_plug(&plug);
329}
330
331
332
333
334
335
336
337
338
339static void issue_bio(struct clone *clone, struct bio *bio)
340{
341 if (!bio_triggers_commit(clone, bio)) {
342 submit_bio_noacct(bio);
343 return;
344 }
345
346
347
348
349
350 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
351 bio_io_error(bio);
352 return;
353 }
354
355
356
357
358
359 spin_lock_irq(&clone->lock);
360 bio_list_add(&clone->deferred_flush_bios, bio);
361 spin_unlock_irq(&clone->lock);
362
363 wake_worker(clone);
364}
365
366
367
368
369
370
371
372static void remap_and_issue(struct clone *clone, struct bio *bio)
373{
374 remap_to_dest(clone, bio);
375 issue_bio(clone, bio);
376}
377
378
379
380
381
382
383
384
385static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
386{
387 struct bio *bio;
388 unsigned long flags;
389 struct bio_list flush_bios = BIO_EMPTY_LIST;
390 struct bio_list normal_bios = BIO_EMPTY_LIST;
391
392 if (bio_list_empty(bios))
393 return;
394
395 while ((bio = bio_list_pop(bios))) {
396 if (bio_triggers_commit(clone, bio))
397 bio_list_add(&flush_bios, bio);
398 else
399 bio_list_add(&normal_bios, bio);
400 }
401
402 spin_lock_irqsave(&clone->lock, flags);
403 bio_list_merge(&clone->deferred_bios, &normal_bios);
404 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
405 spin_unlock_irqrestore(&clone->lock, flags);
406
407 wake_worker(clone);
408}
409
410static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
411{
412 unsigned long flags;
413
414
415
416
417
418
419
420
421
422
423
424 if (!(bio->bi_opf & REQ_FUA)) {
425 bio_endio(bio);
426 return;
427 }
428
429
430
431
432
433 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
434 bio_io_error(bio);
435 return;
436 }
437
438
439
440
441
442 spin_lock_irqsave(&clone->lock, flags);
443 bio_list_add(&clone->deferred_flush_completions, bio);
444 spin_unlock_irqrestore(&clone->lock, flags);
445
446 wake_worker(clone);
447}
448
449static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
450{
451 bio->bi_iter.bi_sector = sector;
452 bio->bi_iter.bi_size = to_bytes(len);
453}
454
455static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
456{
457 unsigned long rs, nr_regions;
458
459
460
461
462
463
464 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
465 remap_to_dest(clone, bio);
466 bio_region_range(clone, bio, &rs, &nr_regions);
467 trim_bio(bio, region_to_sector(clone, rs),
468 nr_regions << clone->region_shift);
469 submit_bio_noacct(bio);
470 } else
471 bio_endio(bio);
472}
473
474static void process_discard_bio(struct clone *clone, struct bio *bio)
475{
476 unsigned long rs, nr_regions;
477
478 bio_region_range(clone, bio, &rs, &nr_regions);
479 if (!nr_regions) {
480 bio_endio(bio);
481 return;
482 }
483
484 if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
485 (rs + nr_regions) > clone->nr_regions)) {
486 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
487 clone_device_name(clone), rs, nr_regions,
488 clone->nr_regions,
489 (unsigned long long)bio->bi_iter.bi_sector,
490 bio_sectors(bio));
491 bio_endio(bio);
492 return;
493 }
494
495
496
497
498
499 if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
500 complete_discard_bio(clone, bio, true);
501 return;
502 }
503
504
505
506
507
508
509 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
510 bio_endio(bio);
511 return;
512 }
513
514
515
516
517 spin_lock_irq(&clone->lock);
518 bio_list_add(&clone->deferred_discard_bios, bio);
519 spin_unlock_irq(&clone->lock);
520
521 wake_worker(clone);
522}
523
524
525
526
527
528
529struct dm_clone_region_hydration {
530 struct clone *clone;
531 unsigned long region_nr;
532
533 struct bio *overwrite_bio;
534 bio_end_io_t *overwrite_bio_end_io;
535
536 struct bio_list deferred_bios;
537
538 blk_status_t status;
539
540
541 struct list_head list;
542
543
544 struct hlist_node h;
545};
546
547
548
549
550
551
552
553
554
555
556
557struct hash_table_bucket {
558 struct hlist_head head;
559
560
561 spinlock_t lock;
562};
563
564#define bucket_lock_irqsave(bucket, flags) \
565 spin_lock_irqsave(&(bucket)->lock, flags)
566
567#define bucket_unlock_irqrestore(bucket, flags) \
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
569
570#define bucket_lock_irq(bucket) \
571 spin_lock_irq(&(bucket)->lock)
572
573#define bucket_unlock_irq(bucket) \
574 spin_unlock_irq(&(bucket)->lock)
575
576static int hash_table_init(struct clone *clone)
577{
578 unsigned int i, sz;
579 struct hash_table_bucket *bucket;
580
581 sz = 1 << HASH_TABLE_BITS;
582
583 clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
584 if (!clone->ht)
585 return -ENOMEM;
586
587 for (i = 0; i < sz; i++) {
588 bucket = clone->ht + i;
589
590 INIT_HLIST_HEAD(&bucket->head);
591 spin_lock_init(&bucket->lock);
592 }
593
594 return 0;
595}
596
597static void hash_table_exit(struct clone *clone)
598{
599 kvfree(clone->ht);
600}
601
602static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
603 unsigned long region_nr)
604{
605 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
606}
607
608
609
610
611
612
613static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
614 unsigned long region_nr)
615{
616 struct dm_clone_region_hydration *hd;
617
618 hlist_for_each_entry(hd, &bucket->head, h) {
619 if (hd->region_nr == region_nr)
620 return hd;
621 }
622
623 return NULL;
624}
625
626
627
628
629
630
631static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
632 struct dm_clone_region_hydration *hd)
633{
634 hlist_add_head(&hd->h, &bucket->head);
635}
636
637
638
639
640
641
642
643
644static struct dm_clone_region_hydration *
645__find_or_insert_region_hydration(struct hash_table_bucket *bucket,
646 struct dm_clone_region_hydration *hd)
647{
648 struct dm_clone_region_hydration *hd2;
649
650 hd2 = __hash_find(bucket, hd->region_nr);
651 if (hd2)
652 return hd2;
653
654 __insert_region_hydration(bucket, hd);
655
656 return hd;
657}
658
659
660
661
662static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
663{
664 struct dm_clone_region_hydration *hd;
665
666
667
668
669
670 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
671 hd->clone = clone;
672
673 return hd;
674}
675
676static inline void free_hydration(struct dm_clone_region_hydration *hd)
677{
678 mempool_free(hd, &hd->clone->hydration_pool);
679}
680
681
682static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
683{
684 hd->region_nr = region_nr;
685 hd->overwrite_bio = NULL;
686 bio_list_init(&hd->deferred_bios);
687 hd->status = 0;
688
689 INIT_LIST_HEAD(&hd->list);
690 INIT_HLIST_NODE(&hd->h);
691}
692
693
694
695
696
697
698
699static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
700{
701 int r = 0;
702 unsigned long flags;
703 struct hash_table_bucket *bucket;
704 struct clone *clone = hd->clone;
705
706 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
707 r = -EPERM;
708
709
710 if (likely(!r) && hd->status == BLK_STS_OK)
711 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
712
713 bucket = get_hash_table_bucket(clone, hd->region_nr);
714
715
716 bucket_lock_irqsave(bucket, flags);
717 hlist_del(&hd->h);
718 bucket_unlock_irqrestore(bucket, flags);
719
720 return r;
721}
722
723
724
725
726
727
728
729
730
731
732
733static void hydration_complete(struct dm_clone_region_hydration *hd)
734{
735 int r;
736 blk_status_t status;
737 struct clone *clone = hd->clone;
738
739 r = hydration_update_metadata(hd);
740
741 if (hd->status == BLK_STS_OK && likely(!r)) {
742 if (hd->overwrite_bio)
743 complete_overwrite_bio(clone, hd->overwrite_bio);
744
745 issue_deferred_bios(clone, &hd->deferred_bios);
746 } else {
747 status = r ? BLK_STS_IOERR : hd->status;
748
749 if (hd->overwrite_bio)
750 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
751
752 fail_bios(&hd->deferred_bios, status);
753 }
754
755 free_hydration(hd);
756
757 if (atomic_dec_and_test(&clone->hydrations_in_flight))
758 wakeup_hydration_waiters(clone);
759}
760
761static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
762{
763 blk_status_t status;
764
765 struct dm_clone_region_hydration *tmp, *hd = context;
766 struct clone *clone = hd->clone;
767
768 LIST_HEAD(batched_hydrations);
769
770 if (read_err || write_err) {
771 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
772 status = BLK_STS_IOERR;
773 } else {
774 status = BLK_STS_OK;
775 }
776 list_splice_tail(&hd->list, &batched_hydrations);
777
778 hd->status = status;
779 hydration_complete(hd);
780
781
782 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
783 hd->status = status;
784 hydration_complete(hd);
785 }
786
787
788 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
789 !atomic_read(&clone->ios_in_flight))
790 wake_worker(clone);
791}
792
793static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
794{
795 unsigned long region_start, region_end;
796 sector_t tail_size, region_size, total_size;
797 struct dm_io_region from, to;
798 struct clone *clone = hd->clone;
799
800 if (WARN_ON(!nr_regions))
801 return;
802
803 region_size = clone->region_size;
804 region_start = hd->region_nr;
805 region_end = region_start + nr_regions - 1;
806
807 total_size = region_to_sector(clone, nr_regions - 1);
808
809 if (region_end == clone->nr_regions - 1) {
810
811
812
813
814 tail_size = clone->ti->len & (region_size - 1);
815 if (!tail_size)
816 tail_size = region_size;
817 } else {
818 tail_size = region_size;
819 }
820
821 total_size += tail_size;
822
823 from.bdev = clone->source_dev->bdev;
824 from.sector = region_to_sector(clone, region_start);
825 from.count = total_size;
826
827 to.bdev = clone->dest_dev->bdev;
828 to.sector = from.sector;
829 to.count = from.count;
830
831
832 atomic_add(nr_regions, &clone->hydrations_in_flight);
833 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
834 hydration_kcopyd_callback, hd);
835}
836
837static void overwrite_endio(struct bio *bio)
838{
839 struct dm_clone_region_hydration *hd = bio->bi_private;
840
841 bio->bi_end_io = hd->overwrite_bio_end_io;
842 hd->status = bio->bi_status;
843
844 hydration_complete(hd);
845}
846
847static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
848{
849
850
851
852
853
854 hd->overwrite_bio = bio;
855 hd->overwrite_bio_end_io = bio->bi_end_io;
856
857 bio->bi_end_io = overwrite_endio;
858 bio->bi_private = hd;
859
860 atomic_inc(&hd->clone->hydrations_in_flight);
861 submit_bio_noacct(bio);
862}
863
864
865
866
867
868
869
870
871
872
873
874static void hydrate_bio_region(struct clone *clone, struct bio *bio)
875{
876 unsigned long region_nr;
877 struct hash_table_bucket *bucket;
878 struct dm_clone_region_hydration *hd, *hd2;
879
880 region_nr = bio_to_region(clone, bio);
881 bucket = get_hash_table_bucket(clone, region_nr);
882
883 bucket_lock_irq(bucket);
884
885 hd = __hash_find(bucket, region_nr);
886 if (hd) {
887
888 bio_list_add(&hd->deferred_bios, bio);
889 bucket_unlock_irq(bucket);
890 return;
891 }
892
893 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
894
895 bucket_unlock_irq(bucket);
896 issue_bio(clone, bio);
897 return;
898 }
899
900
901
902
903
904 bucket_unlock_irq(bucket);
905
906 hd = alloc_hydration(clone);
907 hydration_init(hd, region_nr);
908
909 bucket_lock_irq(bucket);
910
911
912 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
913 bucket_unlock_irq(bucket);
914 free_hydration(hd);
915 issue_bio(clone, bio);
916 return;
917 }
918
919 hd2 = __find_or_insert_region_hydration(bucket, hd);
920 if (hd2 != hd) {
921
922 bio_list_add(&hd2->deferred_bios, bio);
923 bucket_unlock_irq(bucket);
924 free_hydration(hd);
925 return;
926 }
927
928
929
930
931
932
933 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
934 hlist_del(&hd->h);
935 bucket_unlock_irq(bucket);
936 free_hydration(hd);
937 bio_io_error(bio);
938 return;
939 }
940
941
942
943
944
945
946
947
948 if (is_overwrite_bio(clone, bio)) {
949 bucket_unlock_irq(bucket);
950 hydration_overwrite(hd, bio);
951 } else {
952 bio_list_add(&hd->deferred_bios, bio);
953 bucket_unlock_irq(bucket);
954 hydration_copy(hd, 1);
955 }
956}
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973struct batch_info {
974 struct dm_clone_region_hydration *head;
975 unsigned int nr_batched_regions;
976};
977
978static void __batch_hydration(struct batch_info *batch,
979 struct dm_clone_region_hydration *hd)
980{
981 struct clone *clone = hd->clone;
982 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
983
984 if (batch->head) {
985
986 if (batch->nr_batched_regions < max_batch_size &&
987 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
988 list_add_tail(&hd->list, &batch->head->list);
989 batch->nr_batched_regions++;
990 hd = NULL;
991 }
992
993
994 if (batch->nr_batched_regions >= max_batch_size || hd) {
995 hydration_copy(batch->head, batch->nr_batched_regions);
996 batch->head = NULL;
997 batch->nr_batched_regions = 0;
998 }
999 }
1000
1001 if (!hd)
1002 return;
1003
1004
1005 if (max_batch_size <= 1) {
1006 hydration_copy(hd, 1);
1007 return;
1008 }
1009
1010
1011 BUG_ON(!list_empty(&hd->list));
1012 batch->head = hd;
1013 batch->nr_batched_regions = 1;
1014}
1015
1016static unsigned long __start_next_hydration(struct clone *clone,
1017 unsigned long offset,
1018 struct batch_info *batch)
1019{
1020 struct hash_table_bucket *bucket;
1021 struct dm_clone_region_hydration *hd;
1022 unsigned long nr_regions = clone->nr_regions;
1023
1024 hd = alloc_hydration(clone);
1025
1026
1027 do {
1028 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1029 if (offset == nr_regions)
1030 break;
1031
1032 bucket = get_hash_table_bucket(clone, offset);
1033 bucket_lock_irq(bucket);
1034
1035 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1036 !__hash_find(bucket, offset)) {
1037 hydration_init(hd, offset);
1038 __insert_region_hydration(bucket, hd);
1039 bucket_unlock_irq(bucket);
1040
1041
1042 __batch_hydration(batch, hd);
1043
1044 return (offset + 1);
1045 }
1046
1047 bucket_unlock_irq(bucket);
1048
1049 } while (++offset < nr_regions);
1050
1051 if (hd)
1052 free_hydration(hd);
1053
1054 return offset;
1055}
1056
1057
1058
1059
1060
1061static void do_hydration(struct clone *clone)
1062{
1063 unsigned int current_volume;
1064 unsigned long offset, nr_regions = clone->nr_regions;
1065
1066 struct batch_info batch = {
1067 .head = NULL,
1068 .nr_batched_regions = 0,
1069 };
1070
1071 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1072 return;
1073
1074 if (dm_clone_is_hydration_done(clone->cmd))
1075 return;
1076
1077
1078
1079
1080 atomic_inc(&clone->hydrations_in_flight);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 smp_mb__after_atomic();
1091
1092 offset = clone->hydration_offset;
1093 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1094 !atomic_read(&clone->ios_in_flight) &&
1095 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1096 offset < nr_regions) {
1097 current_volume = atomic_read(&clone->hydrations_in_flight);
1098 current_volume += batch.nr_batched_regions;
1099
1100 if (current_volume > READ_ONCE(clone->hydration_threshold))
1101 break;
1102
1103 offset = __start_next_hydration(clone, offset, &batch);
1104 }
1105
1106 if (batch.head)
1107 hydration_copy(batch.head, batch.nr_batched_regions);
1108
1109 if (offset >= nr_regions)
1110 offset = 0;
1111
1112 clone->hydration_offset = offset;
1113
1114 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1115 wakeup_hydration_waiters(clone);
1116}
1117
1118
1119
1120static bool need_commit_due_to_time(struct clone *clone)
1121{
1122 return !time_in_range(jiffies, clone->last_commit_jiffies,
1123 clone->last_commit_jiffies + COMMIT_PERIOD);
1124}
1125
1126
1127
1128
1129static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1130{
1131 int r = 0;
1132
1133 if (dest_dev_flushed)
1134 *dest_dev_flushed = false;
1135
1136 mutex_lock(&clone->commit_lock);
1137
1138 if (!dm_clone_changed_this_transaction(clone->cmd))
1139 goto out;
1140
1141 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1142 r = -EPERM;
1143 goto out;
1144 }
1145
1146 r = dm_clone_metadata_pre_commit(clone->cmd);
1147 if (unlikely(r)) {
1148 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1149 goto out;
1150 }
1151
1152 r = blkdev_issue_flush(clone->dest_dev->bdev);
1153 if (unlikely(r)) {
1154 __metadata_operation_failed(clone, "flush destination device", r);
1155 goto out;
1156 }
1157
1158 if (dest_dev_flushed)
1159 *dest_dev_flushed = true;
1160
1161 r = dm_clone_metadata_commit(clone->cmd);
1162 if (unlikely(r)) {
1163 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1164 goto out;
1165 }
1166
1167 if (dm_clone_is_hydration_done(clone->cmd))
1168 dm_table_event(clone->ti->table);
1169out:
1170 mutex_unlock(&clone->commit_lock);
1171
1172 return r;
1173}
1174
1175static void process_deferred_discards(struct clone *clone)
1176{
1177 int r = -EPERM;
1178 struct bio *bio;
1179 struct blk_plug plug;
1180 unsigned long rs, nr_regions;
1181 struct bio_list discards = BIO_EMPTY_LIST;
1182
1183 spin_lock_irq(&clone->lock);
1184 bio_list_merge(&discards, &clone->deferred_discard_bios);
1185 bio_list_init(&clone->deferred_discard_bios);
1186 spin_unlock_irq(&clone->lock);
1187
1188 if (bio_list_empty(&discards))
1189 return;
1190
1191 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1192 goto out;
1193
1194
1195 bio_list_for_each(bio, &discards) {
1196 bio_region_range(clone, bio, &rs, &nr_regions);
1197
1198
1199
1200
1201
1202 r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
1203 if (unlikely(r))
1204 break;
1205 }
1206out:
1207 blk_start_plug(&plug);
1208 while ((bio = bio_list_pop(&discards)))
1209 complete_discard_bio(clone, bio, r == 0);
1210 blk_finish_plug(&plug);
1211}
1212
1213static void process_deferred_bios(struct clone *clone)
1214{
1215 struct bio_list bios = BIO_EMPTY_LIST;
1216
1217 spin_lock_irq(&clone->lock);
1218 bio_list_merge(&bios, &clone->deferred_bios);
1219 bio_list_init(&clone->deferred_bios);
1220 spin_unlock_irq(&clone->lock);
1221
1222 if (bio_list_empty(&bios))
1223 return;
1224
1225 submit_bios(&bios);
1226}
1227
1228static void process_deferred_flush_bios(struct clone *clone)
1229{
1230 struct bio *bio;
1231 bool dest_dev_flushed;
1232 struct bio_list bios = BIO_EMPTY_LIST;
1233 struct bio_list bio_completions = BIO_EMPTY_LIST;
1234
1235
1236
1237
1238
1239 spin_lock_irq(&clone->lock);
1240 bio_list_merge(&bios, &clone->deferred_flush_bios);
1241 bio_list_init(&clone->deferred_flush_bios);
1242
1243 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1244 bio_list_init(&clone->deferred_flush_completions);
1245 spin_unlock_irq(&clone->lock);
1246
1247 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1248 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1249 return;
1250
1251 if (commit_metadata(clone, &dest_dev_flushed)) {
1252 bio_list_merge(&bios, &bio_completions);
1253
1254 while ((bio = bio_list_pop(&bios)))
1255 bio_io_error(bio);
1256
1257 return;
1258 }
1259
1260 clone->last_commit_jiffies = jiffies;
1261
1262 while ((bio = bio_list_pop(&bio_completions)))
1263 bio_endio(bio);
1264
1265 while ((bio = bio_list_pop(&bios))) {
1266 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1267
1268
1269
1270
1271 bio_endio(bio);
1272 } else {
1273 submit_bio_noacct(bio);
1274 }
1275 }
1276}
1277
1278static void do_worker(struct work_struct *work)
1279{
1280 struct clone *clone = container_of(work, typeof(*clone), worker);
1281
1282 process_deferred_bios(clone);
1283 process_deferred_discards(clone);
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 process_deferred_flush_bios(clone);
1295
1296
1297 do_hydration(clone);
1298}
1299
1300
1301
1302
1303
1304
1305static void do_waker(struct work_struct *work)
1306{
1307 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1308
1309 wake_worker(clone);
1310 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1311}
1312
1313
1314
1315
1316
1317
1318static int clone_map(struct dm_target *ti, struct bio *bio)
1319{
1320 struct clone *clone = ti->private;
1321 unsigned long region_nr;
1322
1323 atomic_inc(&clone->ios_in_flight);
1324
1325 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1326 return DM_MAPIO_KILL;
1327
1328
1329
1330
1331
1332
1333
1334
1335 if (bio->bi_opf & REQ_PREFLUSH) {
1336 remap_and_issue(clone, bio);
1337 return DM_MAPIO_SUBMITTED;
1338 }
1339
1340 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1341
1342
1343
1344
1345
1346
1347 if (bio_op(bio) == REQ_OP_DISCARD) {
1348 process_discard_bio(clone, bio);
1349 return DM_MAPIO_SUBMITTED;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 region_nr = bio_to_region(clone, bio);
1363 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1364 remap_and_issue(clone, bio);
1365 return DM_MAPIO_SUBMITTED;
1366 } else if (bio_data_dir(bio) == READ) {
1367 remap_to_source(clone, bio);
1368 return DM_MAPIO_REMAPPED;
1369 }
1370
1371 remap_to_dest(clone, bio);
1372 hydrate_bio_region(clone, bio);
1373
1374 return DM_MAPIO_SUBMITTED;
1375}
1376
1377static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1378{
1379 struct clone *clone = ti->private;
1380
1381 atomic_dec(&clone->ios_in_flight);
1382
1383 return DM_ENDIO_DONE;
1384}
1385
1386static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1387 ssize_t *sz_ptr)
1388{
1389 ssize_t sz = *sz_ptr;
1390 unsigned int count;
1391
1392 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1393 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1394
1395 DMEMIT("%u ", count);
1396
1397 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1398 DMEMIT("no_hydration ");
1399
1400 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1401 DMEMIT("no_discard_passdown ");
1402
1403 *sz_ptr = sz;
1404}
1405
1406static void emit_core_args(struct clone *clone, char *result,
1407 unsigned int maxlen, ssize_t *sz_ptr)
1408{
1409 ssize_t sz = *sz_ptr;
1410 unsigned int count = 4;
1411
1412 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1413 READ_ONCE(clone->hydration_threshold),
1414 READ_ONCE(clone->hydration_batch_size));
1415
1416 *sz_ptr = sz;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426static void clone_status(struct dm_target *ti, status_type_t type,
1427 unsigned int status_flags, char *result,
1428 unsigned int maxlen)
1429{
1430 int r;
1431 unsigned int i;
1432 ssize_t sz = 0;
1433 dm_block_t nr_free_metadata_blocks = 0;
1434 dm_block_t nr_metadata_blocks = 0;
1435 char buf[BDEVNAME_SIZE];
1436 struct clone *clone = ti->private;
1437
1438 switch (type) {
1439 case STATUSTYPE_INFO:
1440 if (get_clone_mode(clone) == CM_FAIL) {
1441 DMEMIT("Fail");
1442 break;
1443 }
1444
1445
1446 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1447 (void) commit_metadata(clone, NULL);
1448
1449 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1450
1451 if (r) {
1452 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1453 clone_device_name(clone), r);
1454 goto error;
1455 }
1456
1457 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1458
1459 if (r) {
1460 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1461 clone_device_name(clone), r);
1462 goto error;
1463 }
1464
1465 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1466 DM_CLONE_METADATA_BLOCK_SIZE,
1467 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1468 (unsigned long long)nr_metadata_blocks,
1469 (unsigned long long)clone->region_size,
1470 dm_clone_nr_of_hydrated_regions(clone->cmd),
1471 clone->nr_regions,
1472 atomic_read(&clone->hydrations_in_flight));
1473
1474 emit_flags(clone, result, maxlen, &sz);
1475 emit_core_args(clone, result, maxlen, &sz);
1476
1477 switch (get_clone_mode(clone)) {
1478 case CM_WRITE:
1479 DMEMIT("rw");
1480 break;
1481 case CM_READ_ONLY:
1482 DMEMIT("ro");
1483 break;
1484 case CM_FAIL:
1485 DMEMIT("Fail");
1486 }
1487
1488 break;
1489
1490 case STATUSTYPE_TABLE:
1491 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1492 DMEMIT("%s ", buf);
1493
1494 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1495 DMEMIT("%s ", buf);
1496
1497 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1498 DMEMIT("%s", buf);
1499
1500 for (i = 0; i < clone->nr_ctr_args; i++)
1501 DMEMIT(" %s", clone->ctr_args[i]);
1502 break;
1503
1504 case STATUSTYPE_IMA:
1505 *result = '\0';
1506 break;
1507 }
1508
1509 return;
1510
1511error:
1512 DMEMIT("Error");
1513}
1514
1515static sector_t get_dev_size(struct dm_dev *dev)
1516{
1517 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1542{
1543 int r;
1544 unsigned int argc;
1545 const char *arg_name;
1546 struct dm_target *ti = clone->ti;
1547
1548 const struct dm_arg args = {
1549 .min = 0,
1550 .max = 2,
1551 .error = "Invalid number of feature arguments"
1552 };
1553
1554
1555 if (!as->argc)
1556 return 0;
1557
1558 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1559 if (r)
1560 return r;
1561
1562 while (argc) {
1563 arg_name = dm_shift_arg(as);
1564 argc--;
1565
1566 if (!strcasecmp(arg_name, "no_hydration")) {
1567 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1568 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1569 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1570 } else {
1571 ti->error = "Invalid feature argument";
1572 return -EINVAL;
1573 }
1574 }
1575
1576 return 0;
1577}
1578
1579static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1580{
1581 int r;
1582 unsigned int argc;
1583 unsigned int value;
1584 const char *arg_name;
1585 struct dm_target *ti = clone->ti;
1586
1587 const struct dm_arg args = {
1588 .min = 0,
1589 .max = 4,
1590 .error = "Invalid number of core arguments"
1591 };
1592
1593
1594 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1595 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1596
1597
1598 if (!as->argc)
1599 return 0;
1600
1601 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1602 if (r)
1603 return r;
1604
1605 if (argc & 1) {
1606 ti->error = "Number of core arguments must be even";
1607 return -EINVAL;
1608 }
1609
1610 while (argc) {
1611 arg_name = dm_shift_arg(as);
1612 argc -= 2;
1613
1614 if (!strcasecmp(arg_name, "hydration_threshold")) {
1615 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1616 ti->error = "Invalid value for argument `hydration_threshold'";
1617 return -EINVAL;
1618 }
1619 clone->hydration_threshold = value;
1620 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1621 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1622 ti->error = "Invalid value for argument `hydration_batch_size'";
1623 return -EINVAL;
1624 }
1625 clone->hydration_batch_size = value;
1626 } else {
1627 ti->error = "Invalid core argument";
1628 return -EINVAL;
1629 }
1630 }
1631
1632 return 0;
1633}
1634
1635static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1636{
1637 int r;
1638 unsigned int region_size;
1639 struct dm_arg arg;
1640
1641 arg.min = MIN_REGION_SIZE;
1642 arg.max = MAX_REGION_SIZE;
1643 arg.error = "Invalid region size";
1644
1645 r = dm_read_arg(&arg, as, ®ion_size, error);
1646 if (r)
1647 return r;
1648
1649
1650 if (!is_power_of_2(region_size)) {
1651 *error = "Region size is not a power of 2";
1652 return -EINVAL;
1653 }
1654
1655
1656 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1657 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1658 *error = "Region size is not a multiple of device logical block size";
1659 return -EINVAL;
1660 }
1661
1662 clone->region_size = region_size;
1663
1664 return 0;
1665}
1666
1667static int validate_nr_regions(unsigned long n, char **error)
1668{
1669
1670
1671
1672
1673 if (n > (1UL << 31)) {
1674 *error = "Too many regions. Consider increasing the region size";
1675 return -EINVAL;
1676 }
1677
1678 return 0;
1679}
1680
1681static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1682{
1683 int r;
1684 sector_t metadata_dev_size;
1685 char b[BDEVNAME_SIZE];
1686
1687 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1688 &clone->metadata_dev);
1689 if (r) {
1690 *error = "Error opening metadata device";
1691 return r;
1692 }
1693
1694 metadata_dev_size = get_dev_size(clone->metadata_dev);
1695 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1696 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1697 bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
1698
1699 return 0;
1700}
1701
1702static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1703{
1704 int r;
1705 sector_t dest_dev_size;
1706
1707 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1708 &clone->dest_dev);
1709 if (r) {
1710 *error = "Error opening destination device";
1711 return r;
1712 }
1713
1714 dest_dev_size = get_dev_size(clone->dest_dev);
1715 if (dest_dev_size < clone->ti->len) {
1716 dm_put_device(clone->ti, clone->dest_dev);
1717 *error = "Device size larger than destination device";
1718 return -EINVAL;
1719 }
1720
1721 return 0;
1722}
1723
1724static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1725{
1726 int r;
1727 sector_t source_dev_size;
1728
1729 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1730 &clone->source_dev);
1731 if (r) {
1732 *error = "Error opening source device";
1733 return r;
1734 }
1735
1736 source_dev_size = get_dev_size(clone->source_dev);
1737 if (source_dev_size < clone->ti->len) {
1738 dm_put_device(clone->ti, clone->source_dev);
1739 *error = "Device size larger than source device";
1740 return -EINVAL;
1741 }
1742
1743 return 0;
1744}
1745
1746static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1747{
1748 unsigned int i;
1749 const char **copy;
1750
1751 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1752 if (!copy)
1753 goto error;
1754
1755 for (i = 0; i < argc; i++) {
1756 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1757
1758 if (!copy[i]) {
1759 while (i--)
1760 kfree(copy[i]);
1761 kfree(copy);
1762 goto error;
1763 }
1764 }
1765
1766 clone->nr_ctr_args = argc;
1767 clone->ctr_args = copy;
1768 return 0;
1769
1770error:
1771 *error = "Failed to allocate memory for table line";
1772 return -ENOMEM;
1773}
1774
1775static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1776{
1777 int r;
1778 sector_t nr_regions;
1779 struct clone *clone;
1780 struct dm_arg_set as;
1781
1782 if (argc < 4) {
1783 ti->error = "Invalid number of arguments";
1784 return -EINVAL;
1785 }
1786
1787 as.argc = argc;
1788 as.argv = argv;
1789
1790 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1791 if (!clone) {
1792 ti->error = "Failed to allocate clone structure";
1793 return -ENOMEM;
1794 }
1795
1796 clone->ti = ti;
1797
1798
1799 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1800 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1801 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1802
1803 r = parse_metadata_dev(clone, &as, &ti->error);
1804 if (r)
1805 goto out_with_clone;
1806
1807 r = parse_dest_dev(clone, &as, &ti->error);
1808 if (r)
1809 goto out_with_meta_dev;
1810
1811 r = parse_source_dev(clone, &as, &ti->error);
1812 if (r)
1813 goto out_with_dest_dev;
1814
1815 r = parse_region_size(clone, &as, &ti->error);
1816 if (r)
1817 goto out_with_source_dev;
1818
1819 clone->region_shift = __ffs(clone->region_size);
1820 nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1821
1822
1823 if (nr_regions != (unsigned long)nr_regions) {
1824 ti->error = "Too many regions. Consider increasing the region size";
1825 r = -EOVERFLOW;
1826 goto out_with_source_dev;
1827 }
1828
1829 clone->nr_regions = nr_regions;
1830
1831 r = validate_nr_regions(clone->nr_regions, &ti->error);
1832 if (r)
1833 goto out_with_source_dev;
1834
1835 r = dm_set_target_max_io_len(ti, clone->region_size);
1836 if (r) {
1837 ti->error = "Failed to set max io len";
1838 goto out_with_source_dev;
1839 }
1840
1841 r = parse_feature_args(&as, clone);
1842 if (r)
1843 goto out_with_source_dev;
1844
1845 r = parse_core_args(&as, clone);
1846 if (r)
1847 goto out_with_source_dev;
1848
1849
1850 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1851 clone->region_size);
1852 if (IS_ERR(clone->cmd)) {
1853 ti->error = "Failed to load metadata";
1854 r = PTR_ERR(clone->cmd);
1855 goto out_with_source_dev;
1856 }
1857
1858 __set_clone_mode(clone, CM_WRITE);
1859
1860 if (get_clone_mode(clone) != CM_WRITE) {
1861 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1862 r = -EPERM;
1863 goto out_with_metadata;
1864 }
1865
1866 clone->last_commit_jiffies = jiffies;
1867
1868
1869 r = hash_table_init(clone);
1870 if (r) {
1871 ti->error = "Failed to allocate hydration hash table";
1872 goto out_with_metadata;
1873 }
1874
1875 atomic_set(&clone->ios_in_flight, 0);
1876 init_waitqueue_head(&clone->hydration_stopped);
1877 spin_lock_init(&clone->lock);
1878 bio_list_init(&clone->deferred_bios);
1879 bio_list_init(&clone->deferred_discard_bios);
1880 bio_list_init(&clone->deferred_flush_bios);
1881 bio_list_init(&clone->deferred_flush_completions);
1882 clone->hydration_offset = 0;
1883 atomic_set(&clone->hydrations_in_flight, 0);
1884
1885 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1886 if (!clone->wq) {
1887 ti->error = "Failed to allocate workqueue";
1888 r = -ENOMEM;
1889 goto out_with_ht;
1890 }
1891
1892 INIT_WORK(&clone->worker, do_worker);
1893 INIT_DELAYED_WORK(&clone->waker, do_waker);
1894
1895 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1896 if (IS_ERR(clone->kcopyd_client)) {
1897 r = PTR_ERR(clone->kcopyd_client);
1898 goto out_with_wq;
1899 }
1900
1901 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1902 _hydration_cache);
1903 if (r) {
1904 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1905 goto out_with_kcopyd;
1906 }
1907
1908
1909 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1910 if (r)
1911 goto out_with_mempool;
1912
1913 mutex_init(&clone->commit_lock);
1914
1915
1916 ti->num_flush_bios = 1;
1917 ti->flush_supported = true;
1918
1919
1920 ti->discards_supported = true;
1921 ti->num_discard_bios = 1;
1922
1923 ti->private = clone;
1924
1925 return 0;
1926
1927out_with_mempool:
1928 mempool_exit(&clone->hydration_pool);
1929out_with_kcopyd:
1930 dm_kcopyd_client_destroy(clone->kcopyd_client);
1931out_with_wq:
1932 destroy_workqueue(clone->wq);
1933out_with_ht:
1934 hash_table_exit(clone);
1935out_with_metadata:
1936 dm_clone_metadata_close(clone->cmd);
1937out_with_source_dev:
1938 dm_put_device(ti, clone->source_dev);
1939out_with_dest_dev:
1940 dm_put_device(ti, clone->dest_dev);
1941out_with_meta_dev:
1942 dm_put_device(ti, clone->metadata_dev);
1943out_with_clone:
1944 kfree(clone);
1945
1946 return r;
1947}
1948
1949static void clone_dtr(struct dm_target *ti)
1950{
1951 unsigned int i;
1952 struct clone *clone = ti->private;
1953
1954 mutex_destroy(&clone->commit_lock);
1955
1956 for (i = 0; i < clone->nr_ctr_args; i++)
1957 kfree(clone->ctr_args[i]);
1958 kfree(clone->ctr_args);
1959
1960 mempool_exit(&clone->hydration_pool);
1961 dm_kcopyd_client_destroy(clone->kcopyd_client);
1962 destroy_workqueue(clone->wq);
1963 hash_table_exit(clone);
1964 dm_clone_metadata_close(clone->cmd);
1965 dm_put_device(ti, clone->source_dev);
1966 dm_put_device(ti, clone->dest_dev);
1967 dm_put_device(ti, clone->metadata_dev);
1968
1969 kfree(clone);
1970}
1971
1972
1973
1974static void clone_postsuspend(struct dm_target *ti)
1975{
1976 struct clone *clone = ti->private;
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 cancel_delayed_work_sync(&clone->waker);
1994
1995 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1996
1997
1998
1999
2000
2001
2002
2003
2004 smp_mb__after_atomic();
2005
2006 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2007 flush_workqueue(clone->wq);
2008
2009 (void) commit_metadata(clone, NULL);
2010}
2011
2012static void clone_resume(struct dm_target *ti)
2013{
2014 struct clone *clone = ti->private;
2015
2016 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2017 do_waker(&clone->waker.work);
2018}
2019
2020static bool bdev_supports_discards(struct block_device *bdev)
2021{
2022 struct request_queue *q = bdev_get_queue(bdev);
2023
2024 return (q && blk_queue_discard(q));
2025}
2026
2027
2028
2029
2030
2031static void disable_passdown_if_not_supported(struct clone *clone)
2032{
2033 struct block_device *dest_dev = clone->dest_dev->bdev;
2034 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2035 const char *reason = NULL;
2036 char buf[BDEVNAME_SIZE];
2037
2038 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2039 return;
2040
2041 if (!bdev_supports_discards(dest_dev))
2042 reason = "discard unsupported";
2043 else if (dest_limits->max_discard_sectors < clone->region_size)
2044 reason = "max discard sectors smaller than a region";
2045
2046 if (reason) {
2047 DMWARN("Destination device (%s) %s: Disabling discard passdown.",
2048 bdevname(dest_dev, buf), reason);
2049 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2050 }
2051}
2052
2053static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2054{
2055 struct block_device *dest_bdev = clone->dest_dev->bdev;
2056 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2057
2058 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2059
2060 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2061 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2062 return;
2063 }
2064
2065
2066
2067
2068
2069
2070 limits->max_discard_sectors = dest_limits->max_discard_sectors;
2071 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2072 limits->discard_granularity = dest_limits->discard_granularity;
2073 limits->discard_alignment = dest_limits->discard_alignment;
2074 limits->discard_misaligned = dest_limits->discard_misaligned;
2075 limits->max_discard_segments = dest_limits->max_discard_segments;
2076}
2077
2078static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2079{
2080 struct clone *clone = ti->private;
2081 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2082
2083
2084
2085
2086
2087 if (io_opt_sectors < clone->region_size ||
2088 do_div(io_opt_sectors, clone->region_size)) {
2089 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2090 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2091 }
2092
2093 disable_passdown_if_not_supported(clone);
2094 set_discard_limits(clone, limits);
2095}
2096
2097static int clone_iterate_devices(struct dm_target *ti,
2098 iterate_devices_callout_fn fn, void *data)
2099{
2100 int ret;
2101 struct clone *clone = ti->private;
2102 struct dm_dev *dest_dev = clone->dest_dev;
2103 struct dm_dev *source_dev = clone->source_dev;
2104
2105 ret = fn(ti, source_dev, 0, ti->len, data);
2106 if (!ret)
2107 ret = fn(ti, dest_dev, 0, ti->len, data);
2108 return ret;
2109}
2110
2111
2112
2113
2114static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2115{
2116 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2117
2118
2119
2120
2121
2122
2123 wake_worker(clone);
2124}
2125
2126static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2127{
2128 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2129}
2130
2131static void enable_hydration(struct clone *clone)
2132{
2133 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2134 wake_worker(clone);
2135}
2136
2137static void disable_hydration(struct clone *clone)
2138{
2139 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2140}
2141
2142static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2143 char *result, unsigned int maxlen)
2144{
2145 struct clone *clone = ti->private;
2146 unsigned int value;
2147
2148 if (!argc)
2149 return -EINVAL;
2150
2151 if (!strcasecmp(argv[0], "enable_hydration")) {
2152 enable_hydration(clone);
2153 return 0;
2154 }
2155
2156 if (!strcasecmp(argv[0], "disable_hydration")) {
2157 disable_hydration(clone);
2158 return 0;
2159 }
2160
2161 if (argc != 2)
2162 return -EINVAL;
2163
2164 if (!strcasecmp(argv[0], "hydration_threshold")) {
2165 if (kstrtouint(argv[1], 10, &value))
2166 return -EINVAL;
2167
2168 set_hydration_threshold(clone, value);
2169
2170 return 0;
2171 }
2172
2173 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2174 if (kstrtouint(argv[1], 10, &value))
2175 return -EINVAL;
2176
2177 set_hydration_batch_size(clone, value);
2178
2179 return 0;
2180 }
2181
2182 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2183 return -EINVAL;
2184}
2185
2186static struct target_type clone_target = {
2187 .name = "clone",
2188 .version = {1, 0, 0},
2189 .module = THIS_MODULE,
2190 .ctr = clone_ctr,
2191 .dtr = clone_dtr,
2192 .map = clone_map,
2193 .end_io = clone_endio,
2194 .postsuspend = clone_postsuspend,
2195 .resume = clone_resume,
2196 .status = clone_status,
2197 .message = clone_message,
2198 .io_hints = clone_io_hints,
2199 .iterate_devices = clone_iterate_devices,
2200};
2201
2202
2203
2204
2205static int __init dm_clone_init(void)
2206{
2207 int r;
2208
2209 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2210 if (!_hydration_cache)
2211 return -ENOMEM;
2212
2213 r = dm_register_target(&clone_target);
2214 if (r < 0) {
2215 DMERR("Failed to register clone target");
2216 return r;
2217 }
2218
2219 return 0;
2220}
2221
2222static void __exit dm_clone_exit(void)
2223{
2224 dm_unregister_target(&clone_target);
2225
2226 kmem_cache_destroy(_hydration_cache);
2227 _hydration_cache = NULL;
2228}
2229
2230
2231module_init(dm_clone_init);
2232module_exit(dm_clone_exit);
2233
2234MODULE_DESCRIPTION(DM_NAME " clone target");
2235MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2236MODULE_LICENSE("GPL");
2237