1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/bio.h>
8#include <linux/err.h>
9#include <linux/hash.h>
10#include <linux/list.h>
11#include <linux/log2.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/dm-io.h>
16#include <linux/mutex.h>
17#include <linux/atomic.h>
18#include <linux/bitops.h>
19#include <linux/blkdev.h>
20#include <linux/kdev_t.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/mempool.h>
25#include <linux/spinlock.h>
26#include <linux/blk_types.h>
27#include <linux/dm-kcopyd.h>
28#include <linux/workqueue.h>
29#include <linux/backing-dev.h>
30#include <linux/device-mapper.h>
31
32#include "dm.h"
33#include "dm-clone-metadata.h"
34
35#define DM_MSG_PREFIX "clone"
36
37
38
39
40#define MIN_REGION_SIZE (1 << 3)
41#define MAX_REGION_SIZE (1 << 21)
42
43#define MIN_HYDRATIONS 256
44#define DEFAULT_HYDRATION_THRESHOLD 1
45#define DEFAULT_HYDRATION_BATCH_SIZE 1
46
47#define COMMIT_PERIOD HZ
48
49
50
51
52#define HASH_TABLE_BITS 15
53
54DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
55 "A percentage of time allocated for hydrating regions");
56
57
58static struct kmem_cache *_hydration_cache;
59
60
61enum clone_metadata_mode {
62 CM_WRITE,
63 CM_READ_ONLY,
64 CM_FAIL,
65};
66
67struct hash_table_bucket;
68
69struct clone {
70 struct dm_target *ti;
71
72 struct dm_dev *metadata_dev;
73 struct dm_dev *dest_dev;
74 struct dm_dev *source_dev;
75
76 unsigned long nr_regions;
77 sector_t region_size;
78 unsigned int region_shift;
79
80
81
82
83
84 struct mutex commit_lock;
85
86 struct dm_clone_metadata *cmd;
87
88
89
90
91
92 struct bio flush_bio;
93
94
95 struct hash_table_bucket *ht;
96
97 atomic_t ios_in_flight;
98
99 wait_queue_head_t hydration_stopped;
100
101 mempool_t hydration_pool;
102
103 unsigned long last_commit_jiffies;
104
105
106
107
108
109
110
111
112 spinlock_t lock;
113 struct bio_list deferred_bios;
114 struct bio_list deferred_discard_bios;
115 struct bio_list deferred_flush_bios;
116 struct bio_list deferred_flush_completions;
117
118
119 unsigned int hydration_threshold;
120
121
122 unsigned int hydration_batch_size;
123
124
125 unsigned long hydration_offset;
126
127 atomic_t hydrations_in_flight;
128
129
130
131
132
133 unsigned int nr_ctr_args;
134 const char **ctr_args;
135
136 struct workqueue_struct *wq;
137 struct work_struct worker;
138 struct delayed_work waker;
139
140 struct dm_kcopyd_client *kcopyd_client;
141
142 enum clone_metadata_mode mode;
143 unsigned long flags;
144};
145
146
147
148
149#define DM_CLONE_DISCARD_PASSDOWN 0
150#define DM_CLONE_HYDRATION_ENABLED 1
151#define DM_CLONE_HYDRATION_SUSPENDED 2
152
153
154
155
156
157
158static enum clone_metadata_mode get_clone_mode(struct clone *clone)
159{
160 return READ_ONCE(clone->mode);
161}
162
163static const char *clone_device_name(struct clone *clone)
164{
165 return dm_table_device_name(clone->ti->table);
166}
167
168static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
169{
170 const char *descs[] = {
171 "read-write",
172 "read-only",
173 "fail"
174 };
175
176 enum clone_metadata_mode old_mode = get_clone_mode(clone);
177
178
179 if (old_mode == CM_FAIL)
180 new_mode = CM_FAIL;
181
182 switch (new_mode) {
183 case CM_FAIL:
184 case CM_READ_ONLY:
185 dm_clone_metadata_set_read_only(clone->cmd);
186 break;
187
188 case CM_WRITE:
189 dm_clone_metadata_set_read_write(clone->cmd);
190 break;
191 }
192
193 WRITE_ONCE(clone->mode, new_mode);
194
195 if (new_mode != old_mode) {
196 dm_table_event(clone->ti->table);
197 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
198 descs[(int)new_mode]);
199 }
200}
201
202static void __abort_transaction(struct clone *clone)
203{
204 const char *dev_name = clone_device_name(clone);
205
206 if (get_clone_mode(clone) >= CM_READ_ONLY)
207 return;
208
209 DMERR("%s: Aborting current metadata transaction", dev_name);
210 if (dm_clone_metadata_abort(clone->cmd)) {
211 DMERR("%s: Failed to abort metadata transaction", dev_name);
212 __set_clone_mode(clone, CM_FAIL);
213 }
214}
215
216static void __reload_in_core_bitset(struct clone *clone)
217{
218 const char *dev_name = clone_device_name(clone);
219
220 if (get_clone_mode(clone) == CM_FAIL)
221 return;
222
223
224 DMINFO("%s: Reloading on-disk bitmap", dev_name);
225 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
226 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
227 __set_clone_mode(clone, CM_FAIL);
228 }
229}
230
231static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
232{
233 DMERR("%s: Metadata operation `%s' failed: error = %d",
234 clone_device_name(clone), op, r);
235
236 __abort_transaction(clone);
237 __set_clone_mode(clone, CM_READ_ONLY);
238
239
240
241
242
243
244 __reload_in_core_bitset(clone);
245}
246
247
248
249
250static inline void wakeup_hydration_waiters(struct clone *clone)
251{
252 wake_up_all(&clone->hydration_stopped);
253}
254
255static inline void wake_worker(struct clone *clone)
256{
257 queue_work(clone->wq, &clone->worker);
258}
259
260
261
262
263
264
265static inline void remap_to_source(struct clone *clone, struct bio *bio)
266{
267 bio_set_dev(bio, clone->source_dev->bdev);
268}
269
270static inline void remap_to_dest(struct clone *clone, struct bio *bio)
271{
272 bio_set_dev(bio, clone->dest_dev->bdev);
273}
274
275static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
276{
277 return op_is_flush(bio->bi_opf) &&
278 dm_clone_changed_this_transaction(clone->cmd);
279}
280
281
282static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
283{
284 return ((sector_t)region_nr << clone->region_shift);
285}
286
287
288static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
289{
290 return (bio->bi_iter.bi_sector >> clone->region_shift);
291}
292
293
294static void bio_region_range(struct clone *clone, struct bio *bio,
295 unsigned long *rs, unsigned long *nr_regions)
296{
297 unsigned long end;
298
299 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
300 end = bio_end_sector(bio) >> clone->region_shift;
301
302 if (*rs >= end)
303 *nr_regions = 0;
304 else
305 *nr_regions = end - *rs;
306}
307
308
309static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
310{
311 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
312}
313
314static void fail_bios(struct bio_list *bios, blk_status_t status)
315{
316 struct bio *bio;
317
318 while ((bio = bio_list_pop(bios))) {
319 bio->bi_status = status;
320 bio_endio(bio);
321 }
322}
323
324static void submit_bios(struct bio_list *bios)
325{
326 struct bio *bio;
327 struct blk_plug plug;
328
329 blk_start_plug(&plug);
330
331 while ((bio = bio_list_pop(bios)))
332 submit_bio_noacct(bio);
333
334 blk_finish_plug(&plug);
335}
336
337
338
339
340
341
342
343
344
345static void issue_bio(struct clone *clone, struct bio *bio)
346{
347 if (!bio_triggers_commit(clone, bio)) {
348 submit_bio_noacct(bio);
349 return;
350 }
351
352
353
354
355
356 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
357 bio_io_error(bio);
358 return;
359 }
360
361
362
363
364
365 spin_lock_irq(&clone->lock);
366 bio_list_add(&clone->deferred_flush_bios, bio);
367 spin_unlock_irq(&clone->lock);
368
369 wake_worker(clone);
370}
371
372
373
374
375
376
377
378static void remap_and_issue(struct clone *clone, struct bio *bio)
379{
380 remap_to_dest(clone, bio);
381 issue_bio(clone, bio);
382}
383
384
385
386
387
388
389
390
391static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
392{
393 struct bio *bio;
394 unsigned long flags;
395 struct bio_list flush_bios = BIO_EMPTY_LIST;
396 struct bio_list normal_bios = BIO_EMPTY_LIST;
397
398 if (bio_list_empty(bios))
399 return;
400
401 while ((bio = bio_list_pop(bios))) {
402 if (bio_triggers_commit(clone, bio))
403 bio_list_add(&flush_bios, bio);
404 else
405 bio_list_add(&normal_bios, bio);
406 }
407
408 spin_lock_irqsave(&clone->lock, flags);
409 bio_list_merge(&clone->deferred_bios, &normal_bios);
410 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
411 spin_unlock_irqrestore(&clone->lock, flags);
412
413 wake_worker(clone);
414}
415
416static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
417{
418 unsigned long flags;
419
420
421
422
423
424
425
426
427
428
429
430 if (!(bio->bi_opf & REQ_FUA)) {
431 bio_endio(bio);
432 return;
433 }
434
435
436
437
438
439 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
440 bio_io_error(bio);
441 return;
442 }
443
444
445
446
447
448 spin_lock_irqsave(&clone->lock, flags);
449 bio_list_add(&clone->deferred_flush_completions, bio);
450 spin_unlock_irqrestore(&clone->lock, flags);
451
452 wake_worker(clone);
453}
454
455static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
456{
457 bio->bi_iter.bi_sector = sector;
458 bio->bi_iter.bi_size = to_bytes(len);
459}
460
461static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
462{
463 unsigned long rs, nr_regions;
464
465
466
467
468
469
470 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
471 remap_to_dest(clone, bio);
472 bio_region_range(clone, bio, &rs, &nr_regions);
473 trim_bio(bio, region_to_sector(clone, rs),
474 nr_regions << clone->region_shift);
475 submit_bio_noacct(bio);
476 } else
477 bio_endio(bio);
478}
479
480static void process_discard_bio(struct clone *clone, struct bio *bio)
481{
482 unsigned long rs, nr_regions;
483
484 bio_region_range(clone, bio, &rs, &nr_regions);
485 if (!nr_regions) {
486 bio_endio(bio);
487 return;
488 }
489
490 if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
491 (rs + nr_regions) > clone->nr_regions)) {
492 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
493 clone_device_name(clone), rs, nr_regions,
494 clone->nr_regions,
495 (unsigned long long)bio->bi_iter.bi_sector,
496 bio_sectors(bio));
497 bio_endio(bio);
498 return;
499 }
500
501
502
503
504
505 if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
506 complete_discard_bio(clone, bio, true);
507 return;
508 }
509
510
511
512
513
514
515 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
516 bio_endio(bio);
517 return;
518 }
519
520
521
522
523 spin_lock_irq(&clone->lock);
524 bio_list_add(&clone->deferred_discard_bios, bio);
525 spin_unlock_irq(&clone->lock);
526
527 wake_worker(clone);
528}
529
530
531
532
533
534
535struct dm_clone_region_hydration {
536 struct clone *clone;
537 unsigned long region_nr;
538
539 struct bio *overwrite_bio;
540 bio_end_io_t *overwrite_bio_end_io;
541
542 struct bio_list deferred_bios;
543
544 blk_status_t status;
545
546
547 struct list_head list;
548
549
550 struct hlist_node h;
551};
552
553
554
555
556
557
558
559
560
561
562
563struct hash_table_bucket {
564 struct hlist_head head;
565
566
567 spinlock_t lock;
568};
569
570#define bucket_lock_irqsave(bucket, flags) \
571 spin_lock_irqsave(&(bucket)->lock, flags)
572
573#define bucket_unlock_irqrestore(bucket, flags) \
574 spin_unlock_irqrestore(&(bucket)->lock, flags)
575
576#define bucket_lock_irq(bucket) \
577 spin_lock_irq(&(bucket)->lock)
578
579#define bucket_unlock_irq(bucket) \
580 spin_unlock_irq(&(bucket)->lock)
581
582static int hash_table_init(struct clone *clone)
583{
584 unsigned int i, sz;
585 struct hash_table_bucket *bucket;
586
587 sz = 1 << HASH_TABLE_BITS;
588
589 clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
590 if (!clone->ht)
591 return -ENOMEM;
592
593 for (i = 0; i < sz; i++) {
594 bucket = clone->ht + i;
595
596 INIT_HLIST_HEAD(&bucket->head);
597 spin_lock_init(&bucket->lock);
598 }
599
600 return 0;
601}
602
603static void hash_table_exit(struct clone *clone)
604{
605 kvfree(clone->ht);
606}
607
608static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
609 unsigned long region_nr)
610{
611 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
612}
613
614
615
616
617
618
619static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
620 unsigned long region_nr)
621{
622 struct dm_clone_region_hydration *hd;
623
624 hlist_for_each_entry(hd, &bucket->head, h) {
625 if (hd->region_nr == region_nr)
626 return hd;
627 }
628
629 return NULL;
630}
631
632
633
634
635
636
637static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
638 struct dm_clone_region_hydration *hd)
639{
640 hlist_add_head(&hd->h, &bucket->head);
641}
642
643
644
645
646
647
648
649
650static struct dm_clone_region_hydration *
651__find_or_insert_region_hydration(struct hash_table_bucket *bucket,
652 struct dm_clone_region_hydration *hd)
653{
654 struct dm_clone_region_hydration *hd2;
655
656 hd2 = __hash_find(bucket, hd->region_nr);
657 if (hd2)
658 return hd2;
659
660 __insert_region_hydration(bucket, hd);
661
662 return hd;
663}
664
665
666
667
668static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
669{
670 struct dm_clone_region_hydration *hd;
671
672
673
674
675
676 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
677 hd->clone = clone;
678
679 return hd;
680}
681
682static inline void free_hydration(struct dm_clone_region_hydration *hd)
683{
684 mempool_free(hd, &hd->clone->hydration_pool);
685}
686
687
688static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
689{
690 hd->region_nr = region_nr;
691 hd->overwrite_bio = NULL;
692 bio_list_init(&hd->deferred_bios);
693 hd->status = 0;
694
695 INIT_LIST_HEAD(&hd->list);
696 INIT_HLIST_NODE(&hd->h);
697}
698
699
700
701
702
703
704
705static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
706{
707 int r = 0;
708 unsigned long flags;
709 struct hash_table_bucket *bucket;
710 struct clone *clone = hd->clone;
711
712 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
713 r = -EPERM;
714
715
716 if (likely(!r) && hd->status == BLK_STS_OK)
717 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
718
719 bucket = get_hash_table_bucket(clone, hd->region_nr);
720
721
722 bucket_lock_irqsave(bucket, flags);
723 hlist_del(&hd->h);
724 bucket_unlock_irqrestore(bucket, flags);
725
726 return r;
727}
728
729
730
731
732
733
734
735
736
737
738
739static void hydration_complete(struct dm_clone_region_hydration *hd)
740{
741 int r;
742 blk_status_t status;
743 struct clone *clone = hd->clone;
744
745 r = hydration_update_metadata(hd);
746
747 if (hd->status == BLK_STS_OK && likely(!r)) {
748 if (hd->overwrite_bio)
749 complete_overwrite_bio(clone, hd->overwrite_bio);
750
751 issue_deferred_bios(clone, &hd->deferred_bios);
752 } else {
753 status = r ? BLK_STS_IOERR : hd->status;
754
755 if (hd->overwrite_bio)
756 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
757
758 fail_bios(&hd->deferred_bios, status);
759 }
760
761 free_hydration(hd);
762
763 if (atomic_dec_and_test(&clone->hydrations_in_flight))
764 wakeup_hydration_waiters(clone);
765}
766
767static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
768{
769 blk_status_t status;
770
771 struct dm_clone_region_hydration *tmp, *hd = context;
772 struct clone *clone = hd->clone;
773
774 LIST_HEAD(batched_hydrations);
775
776 if (read_err || write_err) {
777 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
778 status = BLK_STS_IOERR;
779 } else {
780 status = BLK_STS_OK;
781 }
782 list_splice_tail(&hd->list, &batched_hydrations);
783
784 hd->status = status;
785 hydration_complete(hd);
786
787
788 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
789 hd->status = status;
790 hydration_complete(hd);
791 }
792
793
794 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
795 !atomic_read(&clone->ios_in_flight))
796 wake_worker(clone);
797}
798
799static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
800{
801 unsigned long region_start, region_end;
802 sector_t tail_size, region_size, total_size;
803 struct dm_io_region from, to;
804 struct clone *clone = hd->clone;
805
806 if (WARN_ON(!nr_regions))
807 return;
808
809 region_size = clone->region_size;
810 region_start = hd->region_nr;
811 region_end = region_start + nr_regions - 1;
812
813 total_size = region_to_sector(clone, nr_regions - 1);
814
815 if (region_end == clone->nr_regions - 1) {
816
817
818
819
820 tail_size = clone->ti->len & (region_size - 1);
821 if (!tail_size)
822 tail_size = region_size;
823 } else {
824 tail_size = region_size;
825 }
826
827 total_size += tail_size;
828
829 from.bdev = clone->source_dev->bdev;
830 from.sector = region_to_sector(clone, region_start);
831 from.count = total_size;
832
833 to.bdev = clone->dest_dev->bdev;
834 to.sector = from.sector;
835 to.count = from.count;
836
837
838 atomic_add(nr_regions, &clone->hydrations_in_flight);
839 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
840 hydration_kcopyd_callback, hd);
841}
842
843static void overwrite_endio(struct bio *bio)
844{
845 struct dm_clone_region_hydration *hd = bio->bi_private;
846
847 bio->bi_end_io = hd->overwrite_bio_end_io;
848 hd->status = bio->bi_status;
849
850 hydration_complete(hd);
851}
852
853static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
854{
855
856
857
858
859
860 hd->overwrite_bio = bio;
861 hd->overwrite_bio_end_io = bio->bi_end_io;
862
863 bio->bi_end_io = overwrite_endio;
864 bio->bi_private = hd;
865
866 atomic_inc(&hd->clone->hydrations_in_flight);
867 submit_bio_noacct(bio);
868}
869
870
871
872
873
874
875
876
877
878
879
880static void hydrate_bio_region(struct clone *clone, struct bio *bio)
881{
882 unsigned long region_nr;
883 struct hash_table_bucket *bucket;
884 struct dm_clone_region_hydration *hd, *hd2;
885
886 region_nr = bio_to_region(clone, bio);
887 bucket = get_hash_table_bucket(clone, region_nr);
888
889 bucket_lock_irq(bucket);
890
891 hd = __hash_find(bucket, region_nr);
892 if (hd) {
893
894 bio_list_add(&hd->deferred_bios, bio);
895 bucket_unlock_irq(bucket);
896 return;
897 }
898
899 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
900
901 bucket_unlock_irq(bucket);
902 issue_bio(clone, bio);
903 return;
904 }
905
906
907
908
909
910 bucket_unlock_irq(bucket);
911
912 hd = alloc_hydration(clone);
913 hydration_init(hd, region_nr);
914
915 bucket_lock_irq(bucket);
916
917
918 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
919 bucket_unlock_irq(bucket);
920 free_hydration(hd);
921 issue_bio(clone, bio);
922 return;
923 }
924
925 hd2 = __find_or_insert_region_hydration(bucket, hd);
926 if (hd2 != hd) {
927
928 bio_list_add(&hd2->deferred_bios, bio);
929 bucket_unlock_irq(bucket);
930 free_hydration(hd);
931 return;
932 }
933
934
935
936
937
938
939 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
940 hlist_del(&hd->h);
941 bucket_unlock_irq(bucket);
942 free_hydration(hd);
943 bio_io_error(bio);
944 return;
945 }
946
947
948
949
950
951
952
953
954 if (is_overwrite_bio(clone, bio)) {
955 bucket_unlock_irq(bucket);
956 hydration_overwrite(hd, bio);
957 } else {
958 bio_list_add(&hd->deferred_bios, bio);
959 bucket_unlock_irq(bucket);
960 hydration_copy(hd, 1);
961 }
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979struct batch_info {
980 struct dm_clone_region_hydration *head;
981 unsigned int nr_batched_regions;
982};
983
984static void __batch_hydration(struct batch_info *batch,
985 struct dm_clone_region_hydration *hd)
986{
987 struct clone *clone = hd->clone;
988 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
989
990 if (batch->head) {
991
992 if (batch->nr_batched_regions < max_batch_size &&
993 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
994 list_add_tail(&hd->list, &batch->head->list);
995 batch->nr_batched_regions++;
996 hd = NULL;
997 }
998
999
1000 if (batch->nr_batched_regions >= max_batch_size || hd) {
1001 hydration_copy(batch->head, batch->nr_batched_regions);
1002 batch->head = NULL;
1003 batch->nr_batched_regions = 0;
1004 }
1005 }
1006
1007 if (!hd)
1008 return;
1009
1010
1011 if (max_batch_size <= 1) {
1012 hydration_copy(hd, 1);
1013 return;
1014 }
1015
1016
1017 BUG_ON(!list_empty(&hd->list));
1018 batch->head = hd;
1019 batch->nr_batched_regions = 1;
1020}
1021
1022static unsigned long __start_next_hydration(struct clone *clone,
1023 unsigned long offset,
1024 struct batch_info *batch)
1025{
1026 struct hash_table_bucket *bucket;
1027 struct dm_clone_region_hydration *hd;
1028 unsigned long nr_regions = clone->nr_regions;
1029
1030 hd = alloc_hydration(clone);
1031
1032
1033 do {
1034 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1035 if (offset == nr_regions)
1036 break;
1037
1038 bucket = get_hash_table_bucket(clone, offset);
1039 bucket_lock_irq(bucket);
1040
1041 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1042 !__hash_find(bucket, offset)) {
1043 hydration_init(hd, offset);
1044 __insert_region_hydration(bucket, hd);
1045 bucket_unlock_irq(bucket);
1046
1047
1048 __batch_hydration(batch, hd);
1049
1050 return (offset + 1);
1051 }
1052
1053 bucket_unlock_irq(bucket);
1054
1055 } while (++offset < nr_regions);
1056
1057 if (hd)
1058 free_hydration(hd);
1059
1060 return offset;
1061}
1062
1063
1064
1065
1066
1067static void do_hydration(struct clone *clone)
1068{
1069 unsigned int current_volume;
1070 unsigned long offset, nr_regions = clone->nr_regions;
1071
1072 struct batch_info batch = {
1073 .head = NULL,
1074 .nr_batched_regions = 0,
1075 };
1076
1077 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1078 return;
1079
1080 if (dm_clone_is_hydration_done(clone->cmd))
1081 return;
1082
1083
1084
1085
1086 atomic_inc(&clone->hydrations_in_flight);
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 smp_mb__after_atomic();
1097
1098 offset = clone->hydration_offset;
1099 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1100 !atomic_read(&clone->ios_in_flight) &&
1101 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1102 offset < nr_regions) {
1103 current_volume = atomic_read(&clone->hydrations_in_flight);
1104 current_volume += batch.nr_batched_regions;
1105
1106 if (current_volume > READ_ONCE(clone->hydration_threshold))
1107 break;
1108
1109 offset = __start_next_hydration(clone, offset, &batch);
1110 }
1111
1112 if (batch.head)
1113 hydration_copy(batch.head, batch.nr_batched_regions);
1114
1115 if (offset >= nr_regions)
1116 offset = 0;
1117
1118 clone->hydration_offset = offset;
1119
1120 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1121 wakeup_hydration_waiters(clone);
1122}
1123
1124
1125
1126static bool need_commit_due_to_time(struct clone *clone)
1127{
1128 return !time_in_range(jiffies, clone->last_commit_jiffies,
1129 clone->last_commit_jiffies + COMMIT_PERIOD);
1130}
1131
1132
1133
1134
1135static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1136{
1137 int r = 0;
1138
1139 if (dest_dev_flushed)
1140 *dest_dev_flushed = false;
1141
1142 mutex_lock(&clone->commit_lock);
1143
1144 if (!dm_clone_changed_this_transaction(clone->cmd))
1145 goto out;
1146
1147 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1148 r = -EPERM;
1149 goto out;
1150 }
1151
1152 r = dm_clone_metadata_pre_commit(clone->cmd);
1153 if (unlikely(r)) {
1154 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1155 goto out;
1156 }
1157
1158 bio_reset(&clone->flush_bio);
1159 bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
1160 clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1161
1162 r = submit_bio_wait(&clone->flush_bio);
1163 if (unlikely(r)) {
1164 __metadata_operation_failed(clone, "flush destination device", r);
1165 goto out;
1166 }
1167
1168 if (dest_dev_flushed)
1169 *dest_dev_flushed = true;
1170
1171 r = dm_clone_metadata_commit(clone->cmd);
1172 if (unlikely(r)) {
1173 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1174 goto out;
1175 }
1176
1177 if (dm_clone_is_hydration_done(clone->cmd))
1178 dm_table_event(clone->ti->table);
1179out:
1180 mutex_unlock(&clone->commit_lock);
1181
1182 return r;
1183}
1184
1185static void process_deferred_discards(struct clone *clone)
1186{
1187 int r = -EPERM;
1188 struct bio *bio;
1189 struct blk_plug plug;
1190 unsigned long rs, nr_regions;
1191 struct bio_list discards = BIO_EMPTY_LIST;
1192
1193 spin_lock_irq(&clone->lock);
1194 bio_list_merge(&discards, &clone->deferred_discard_bios);
1195 bio_list_init(&clone->deferred_discard_bios);
1196 spin_unlock_irq(&clone->lock);
1197
1198 if (bio_list_empty(&discards))
1199 return;
1200
1201 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1202 goto out;
1203
1204
1205 bio_list_for_each(bio, &discards) {
1206 bio_region_range(clone, bio, &rs, &nr_regions);
1207
1208
1209
1210
1211
1212 r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
1213 if (unlikely(r))
1214 break;
1215 }
1216out:
1217 blk_start_plug(&plug);
1218 while ((bio = bio_list_pop(&discards)))
1219 complete_discard_bio(clone, bio, r == 0);
1220 blk_finish_plug(&plug);
1221}
1222
1223static void process_deferred_bios(struct clone *clone)
1224{
1225 struct bio_list bios = BIO_EMPTY_LIST;
1226
1227 spin_lock_irq(&clone->lock);
1228 bio_list_merge(&bios, &clone->deferred_bios);
1229 bio_list_init(&clone->deferred_bios);
1230 spin_unlock_irq(&clone->lock);
1231
1232 if (bio_list_empty(&bios))
1233 return;
1234
1235 submit_bios(&bios);
1236}
1237
1238static void process_deferred_flush_bios(struct clone *clone)
1239{
1240 struct bio *bio;
1241 bool dest_dev_flushed;
1242 struct bio_list bios = BIO_EMPTY_LIST;
1243 struct bio_list bio_completions = BIO_EMPTY_LIST;
1244
1245
1246
1247
1248
1249 spin_lock_irq(&clone->lock);
1250 bio_list_merge(&bios, &clone->deferred_flush_bios);
1251 bio_list_init(&clone->deferred_flush_bios);
1252
1253 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1254 bio_list_init(&clone->deferred_flush_completions);
1255 spin_unlock_irq(&clone->lock);
1256
1257 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1258 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1259 return;
1260
1261 if (commit_metadata(clone, &dest_dev_flushed)) {
1262 bio_list_merge(&bios, &bio_completions);
1263
1264 while ((bio = bio_list_pop(&bios)))
1265 bio_io_error(bio);
1266
1267 return;
1268 }
1269
1270 clone->last_commit_jiffies = jiffies;
1271
1272 while ((bio = bio_list_pop(&bio_completions)))
1273 bio_endio(bio);
1274
1275 while ((bio = bio_list_pop(&bios))) {
1276 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1277
1278
1279
1280
1281 bio_endio(bio);
1282 } else {
1283 submit_bio_noacct(bio);
1284 }
1285 }
1286}
1287
1288static void do_worker(struct work_struct *work)
1289{
1290 struct clone *clone = container_of(work, typeof(*clone), worker);
1291
1292 process_deferred_bios(clone);
1293 process_deferred_discards(clone);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 process_deferred_flush_bios(clone);
1305
1306
1307 do_hydration(clone);
1308}
1309
1310
1311
1312
1313
1314
1315static void do_waker(struct work_struct *work)
1316{
1317 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1318
1319 wake_worker(clone);
1320 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1321}
1322
1323
1324
1325
1326
1327
1328static int clone_map(struct dm_target *ti, struct bio *bio)
1329{
1330 struct clone *clone = ti->private;
1331 unsigned long region_nr;
1332
1333 atomic_inc(&clone->ios_in_flight);
1334
1335 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1336 return DM_MAPIO_KILL;
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (bio->bi_opf & REQ_PREFLUSH) {
1346 remap_and_issue(clone, bio);
1347 return DM_MAPIO_SUBMITTED;
1348 }
1349
1350 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1351
1352
1353
1354
1355
1356
1357 if (bio_op(bio) == REQ_OP_DISCARD) {
1358 process_discard_bio(clone, bio);
1359 return DM_MAPIO_SUBMITTED;
1360 }
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 region_nr = bio_to_region(clone, bio);
1373 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1374 remap_and_issue(clone, bio);
1375 return DM_MAPIO_SUBMITTED;
1376 } else if (bio_data_dir(bio) == READ) {
1377 remap_to_source(clone, bio);
1378 return DM_MAPIO_REMAPPED;
1379 }
1380
1381 remap_to_dest(clone, bio);
1382 hydrate_bio_region(clone, bio);
1383
1384 return DM_MAPIO_SUBMITTED;
1385}
1386
1387static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1388{
1389 struct clone *clone = ti->private;
1390
1391 atomic_dec(&clone->ios_in_flight);
1392
1393 return DM_ENDIO_DONE;
1394}
1395
1396static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1397 ssize_t *sz_ptr)
1398{
1399 ssize_t sz = *sz_ptr;
1400 unsigned int count;
1401
1402 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1403 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1404
1405 DMEMIT("%u ", count);
1406
1407 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1408 DMEMIT("no_hydration ");
1409
1410 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1411 DMEMIT("no_discard_passdown ");
1412
1413 *sz_ptr = sz;
1414}
1415
1416static void emit_core_args(struct clone *clone, char *result,
1417 unsigned int maxlen, ssize_t *sz_ptr)
1418{
1419 ssize_t sz = *sz_ptr;
1420 unsigned int count = 4;
1421
1422 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1423 READ_ONCE(clone->hydration_threshold),
1424 READ_ONCE(clone->hydration_batch_size));
1425
1426 *sz_ptr = sz;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436static void clone_status(struct dm_target *ti, status_type_t type,
1437 unsigned int status_flags, char *result,
1438 unsigned int maxlen)
1439{
1440 int r;
1441 unsigned int i;
1442 ssize_t sz = 0;
1443 dm_block_t nr_free_metadata_blocks = 0;
1444 dm_block_t nr_metadata_blocks = 0;
1445 char buf[BDEVNAME_SIZE];
1446 struct clone *clone = ti->private;
1447
1448 switch (type) {
1449 case STATUSTYPE_INFO:
1450 if (get_clone_mode(clone) == CM_FAIL) {
1451 DMEMIT("Fail");
1452 break;
1453 }
1454
1455
1456 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1457 (void) commit_metadata(clone, NULL);
1458
1459 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1460
1461 if (r) {
1462 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1463 clone_device_name(clone), r);
1464 goto error;
1465 }
1466
1467 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1468
1469 if (r) {
1470 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1471 clone_device_name(clone), r);
1472 goto error;
1473 }
1474
1475 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1476 DM_CLONE_METADATA_BLOCK_SIZE,
1477 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1478 (unsigned long long)nr_metadata_blocks,
1479 (unsigned long long)clone->region_size,
1480 dm_clone_nr_of_hydrated_regions(clone->cmd),
1481 clone->nr_regions,
1482 atomic_read(&clone->hydrations_in_flight));
1483
1484 emit_flags(clone, result, maxlen, &sz);
1485 emit_core_args(clone, result, maxlen, &sz);
1486
1487 switch (get_clone_mode(clone)) {
1488 case CM_WRITE:
1489 DMEMIT("rw");
1490 break;
1491 case CM_READ_ONLY:
1492 DMEMIT("ro");
1493 break;
1494 case CM_FAIL:
1495 DMEMIT("Fail");
1496 }
1497
1498 break;
1499
1500 case STATUSTYPE_TABLE:
1501 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1502 DMEMIT("%s ", buf);
1503
1504 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1505 DMEMIT("%s ", buf);
1506
1507 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1508 DMEMIT("%s", buf);
1509
1510 for (i = 0; i < clone->nr_ctr_args; i++)
1511 DMEMIT(" %s", clone->ctr_args[i]);
1512 }
1513
1514 return;
1515
1516error:
1517 DMEMIT("Error");
1518}
1519
1520static sector_t get_dev_size(struct dm_dev *dev)
1521{
1522 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1547{
1548 int r;
1549 unsigned int argc;
1550 const char *arg_name;
1551 struct dm_target *ti = clone->ti;
1552
1553 const struct dm_arg args = {
1554 .min = 0,
1555 .max = 2,
1556 .error = "Invalid number of feature arguments"
1557 };
1558
1559
1560 if (!as->argc)
1561 return 0;
1562
1563 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1564 if (r)
1565 return r;
1566
1567 while (argc) {
1568 arg_name = dm_shift_arg(as);
1569 argc--;
1570
1571 if (!strcasecmp(arg_name, "no_hydration")) {
1572 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1573 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1574 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1575 } else {
1576 ti->error = "Invalid feature argument";
1577 return -EINVAL;
1578 }
1579 }
1580
1581 return 0;
1582}
1583
1584static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1585{
1586 int r;
1587 unsigned int argc;
1588 unsigned int value;
1589 const char *arg_name;
1590 struct dm_target *ti = clone->ti;
1591
1592 const struct dm_arg args = {
1593 .min = 0,
1594 .max = 4,
1595 .error = "Invalid number of core arguments"
1596 };
1597
1598
1599 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1600 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1601
1602
1603 if (!as->argc)
1604 return 0;
1605
1606 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1607 if (r)
1608 return r;
1609
1610 if (argc & 1) {
1611 ti->error = "Number of core arguments must be even";
1612 return -EINVAL;
1613 }
1614
1615 while (argc) {
1616 arg_name = dm_shift_arg(as);
1617 argc -= 2;
1618
1619 if (!strcasecmp(arg_name, "hydration_threshold")) {
1620 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1621 ti->error = "Invalid value for argument `hydration_threshold'";
1622 return -EINVAL;
1623 }
1624 clone->hydration_threshold = value;
1625 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1626 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1627 ti->error = "Invalid value for argument `hydration_batch_size'";
1628 return -EINVAL;
1629 }
1630 clone->hydration_batch_size = value;
1631 } else {
1632 ti->error = "Invalid core argument";
1633 return -EINVAL;
1634 }
1635 }
1636
1637 return 0;
1638}
1639
1640static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1641{
1642 int r;
1643 unsigned int region_size;
1644 struct dm_arg arg;
1645
1646 arg.min = MIN_REGION_SIZE;
1647 arg.max = MAX_REGION_SIZE;
1648 arg.error = "Invalid region size";
1649
1650 r = dm_read_arg(&arg, as, ®ion_size, error);
1651 if (r)
1652 return r;
1653
1654
1655 if (!is_power_of_2(region_size)) {
1656 *error = "Region size is not a power of 2";
1657 return -EINVAL;
1658 }
1659
1660
1661 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1662 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1663 *error = "Region size is not a multiple of device logical block size";
1664 return -EINVAL;
1665 }
1666
1667 clone->region_size = region_size;
1668
1669 return 0;
1670}
1671
1672static int validate_nr_regions(unsigned long n, char **error)
1673{
1674
1675
1676
1677
1678 if (n > (1UL << 31)) {
1679 *error = "Too many regions. Consider increasing the region size";
1680 return -EINVAL;
1681 }
1682
1683 return 0;
1684}
1685
1686static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1687{
1688 int r;
1689 sector_t metadata_dev_size;
1690 char b[BDEVNAME_SIZE];
1691
1692 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1693 &clone->metadata_dev);
1694 if (r) {
1695 *error = "Error opening metadata device";
1696 return r;
1697 }
1698
1699 metadata_dev_size = get_dev_size(clone->metadata_dev);
1700 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1701 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1702 bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
1703
1704 return 0;
1705}
1706
1707static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1708{
1709 int r;
1710 sector_t dest_dev_size;
1711
1712 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1713 &clone->dest_dev);
1714 if (r) {
1715 *error = "Error opening destination device";
1716 return r;
1717 }
1718
1719 dest_dev_size = get_dev_size(clone->dest_dev);
1720 if (dest_dev_size < clone->ti->len) {
1721 dm_put_device(clone->ti, clone->dest_dev);
1722 *error = "Device size larger than destination device";
1723 return -EINVAL;
1724 }
1725
1726 return 0;
1727}
1728
1729static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1730{
1731 int r;
1732 sector_t source_dev_size;
1733
1734 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1735 &clone->source_dev);
1736 if (r) {
1737 *error = "Error opening source device";
1738 return r;
1739 }
1740
1741 source_dev_size = get_dev_size(clone->source_dev);
1742 if (source_dev_size < clone->ti->len) {
1743 dm_put_device(clone->ti, clone->source_dev);
1744 *error = "Device size larger than source device";
1745 return -EINVAL;
1746 }
1747
1748 return 0;
1749}
1750
1751static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1752{
1753 unsigned int i;
1754 const char **copy;
1755
1756 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1757 if (!copy)
1758 goto error;
1759
1760 for (i = 0; i < argc; i++) {
1761 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1762
1763 if (!copy[i]) {
1764 while (i--)
1765 kfree(copy[i]);
1766 kfree(copy);
1767 goto error;
1768 }
1769 }
1770
1771 clone->nr_ctr_args = argc;
1772 clone->ctr_args = copy;
1773 return 0;
1774
1775error:
1776 *error = "Failed to allocate memory for table line";
1777 return -ENOMEM;
1778}
1779
1780static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1781{
1782 int r;
1783 sector_t nr_regions;
1784 struct clone *clone;
1785 struct dm_arg_set as;
1786
1787 if (argc < 4) {
1788 ti->error = "Invalid number of arguments";
1789 return -EINVAL;
1790 }
1791
1792 as.argc = argc;
1793 as.argv = argv;
1794
1795 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1796 if (!clone) {
1797 ti->error = "Failed to allocate clone structure";
1798 return -ENOMEM;
1799 }
1800
1801 clone->ti = ti;
1802
1803
1804 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1805 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1806 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1807
1808 r = parse_metadata_dev(clone, &as, &ti->error);
1809 if (r)
1810 goto out_with_clone;
1811
1812 r = parse_dest_dev(clone, &as, &ti->error);
1813 if (r)
1814 goto out_with_meta_dev;
1815
1816 r = parse_source_dev(clone, &as, &ti->error);
1817 if (r)
1818 goto out_with_dest_dev;
1819
1820 r = parse_region_size(clone, &as, &ti->error);
1821 if (r)
1822 goto out_with_source_dev;
1823
1824 clone->region_shift = __ffs(clone->region_size);
1825 nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1826
1827
1828 if (nr_regions != (unsigned long)nr_regions) {
1829 ti->error = "Too many regions. Consider increasing the region size";
1830 r = -EOVERFLOW;
1831 goto out_with_source_dev;
1832 }
1833
1834 clone->nr_regions = nr_regions;
1835
1836 r = validate_nr_regions(clone->nr_regions, &ti->error);
1837 if (r)
1838 goto out_with_source_dev;
1839
1840 r = dm_set_target_max_io_len(ti, clone->region_size);
1841 if (r) {
1842 ti->error = "Failed to set max io len";
1843 goto out_with_source_dev;
1844 }
1845
1846 r = parse_feature_args(&as, clone);
1847 if (r)
1848 goto out_with_source_dev;
1849
1850 r = parse_core_args(&as, clone);
1851 if (r)
1852 goto out_with_source_dev;
1853
1854
1855 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1856 clone->region_size);
1857 if (IS_ERR(clone->cmd)) {
1858 ti->error = "Failed to load metadata";
1859 r = PTR_ERR(clone->cmd);
1860 goto out_with_source_dev;
1861 }
1862
1863 __set_clone_mode(clone, CM_WRITE);
1864
1865 if (get_clone_mode(clone) != CM_WRITE) {
1866 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1867 r = -EPERM;
1868 goto out_with_metadata;
1869 }
1870
1871 clone->last_commit_jiffies = jiffies;
1872
1873
1874 r = hash_table_init(clone);
1875 if (r) {
1876 ti->error = "Failed to allocate hydration hash table";
1877 goto out_with_metadata;
1878 }
1879
1880 atomic_set(&clone->ios_in_flight, 0);
1881 init_waitqueue_head(&clone->hydration_stopped);
1882 spin_lock_init(&clone->lock);
1883 bio_list_init(&clone->deferred_bios);
1884 bio_list_init(&clone->deferred_discard_bios);
1885 bio_list_init(&clone->deferred_flush_bios);
1886 bio_list_init(&clone->deferred_flush_completions);
1887 clone->hydration_offset = 0;
1888 atomic_set(&clone->hydrations_in_flight, 0);
1889 bio_init(&clone->flush_bio, NULL, 0);
1890
1891 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1892 if (!clone->wq) {
1893 ti->error = "Failed to allocate workqueue";
1894 r = -ENOMEM;
1895 goto out_with_ht;
1896 }
1897
1898 INIT_WORK(&clone->worker, do_worker);
1899 INIT_DELAYED_WORK(&clone->waker, do_waker);
1900
1901 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1902 if (IS_ERR(clone->kcopyd_client)) {
1903 r = PTR_ERR(clone->kcopyd_client);
1904 goto out_with_wq;
1905 }
1906
1907 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1908 _hydration_cache);
1909 if (r) {
1910 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1911 goto out_with_kcopyd;
1912 }
1913
1914
1915 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1916 if (r)
1917 goto out_with_mempool;
1918
1919 mutex_init(&clone->commit_lock);
1920
1921
1922 ti->num_flush_bios = 1;
1923 ti->flush_supported = true;
1924
1925
1926 ti->discards_supported = true;
1927 ti->num_discard_bios = 1;
1928
1929 ti->private = clone;
1930
1931 return 0;
1932
1933out_with_mempool:
1934 mempool_exit(&clone->hydration_pool);
1935out_with_kcopyd:
1936 dm_kcopyd_client_destroy(clone->kcopyd_client);
1937out_with_wq:
1938 destroy_workqueue(clone->wq);
1939out_with_ht:
1940 hash_table_exit(clone);
1941out_with_metadata:
1942 dm_clone_metadata_close(clone->cmd);
1943out_with_source_dev:
1944 dm_put_device(ti, clone->source_dev);
1945out_with_dest_dev:
1946 dm_put_device(ti, clone->dest_dev);
1947out_with_meta_dev:
1948 dm_put_device(ti, clone->metadata_dev);
1949out_with_clone:
1950 kfree(clone);
1951
1952 return r;
1953}
1954
1955static void clone_dtr(struct dm_target *ti)
1956{
1957 unsigned int i;
1958 struct clone *clone = ti->private;
1959
1960 mutex_destroy(&clone->commit_lock);
1961 bio_uninit(&clone->flush_bio);
1962
1963 for (i = 0; i < clone->nr_ctr_args; i++)
1964 kfree(clone->ctr_args[i]);
1965 kfree(clone->ctr_args);
1966
1967 mempool_exit(&clone->hydration_pool);
1968 dm_kcopyd_client_destroy(clone->kcopyd_client);
1969 destroy_workqueue(clone->wq);
1970 hash_table_exit(clone);
1971 dm_clone_metadata_close(clone->cmd);
1972 dm_put_device(ti, clone->source_dev);
1973 dm_put_device(ti, clone->dest_dev);
1974 dm_put_device(ti, clone->metadata_dev);
1975
1976 kfree(clone);
1977}
1978
1979
1980
1981static void clone_postsuspend(struct dm_target *ti)
1982{
1983 struct clone *clone = ti->private;
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000 cancel_delayed_work_sync(&clone->waker);
2001
2002 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2003
2004
2005
2006
2007
2008
2009
2010
2011 smp_mb__after_atomic();
2012
2013 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2014 flush_workqueue(clone->wq);
2015
2016 (void) commit_metadata(clone, NULL);
2017}
2018
2019static void clone_resume(struct dm_target *ti)
2020{
2021 struct clone *clone = ti->private;
2022
2023 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2024 do_waker(&clone->waker.work);
2025}
2026
2027static bool bdev_supports_discards(struct block_device *bdev)
2028{
2029 struct request_queue *q = bdev_get_queue(bdev);
2030
2031 return (q && blk_queue_discard(q));
2032}
2033
2034
2035
2036
2037
2038static void disable_passdown_if_not_supported(struct clone *clone)
2039{
2040 struct block_device *dest_dev = clone->dest_dev->bdev;
2041 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2042 const char *reason = NULL;
2043 char buf[BDEVNAME_SIZE];
2044
2045 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2046 return;
2047
2048 if (!bdev_supports_discards(dest_dev))
2049 reason = "discard unsupported";
2050 else if (dest_limits->max_discard_sectors < clone->region_size)
2051 reason = "max discard sectors smaller than a region";
2052
2053 if (reason) {
2054 DMWARN("Destination device (%s) %s: Disabling discard passdown.",
2055 bdevname(dest_dev, buf), reason);
2056 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2057 }
2058}
2059
2060static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2061{
2062 struct block_device *dest_bdev = clone->dest_dev->bdev;
2063 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2064
2065 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2066
2067 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2068 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2069 return;
2070 }
2071
2072
2073
2074
2075
2076
2077 limits->max_discard_sectors = dest_limits->max_discard_sectors;
2078 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2079 limits->discard_granularity = dest_limits->discard_granularity;
2080 limits->discard_alignment = dest_limits->discard_alignment;
2081 limits->discard_misaligned = dest_limits->discard_misaligned;
2082 limits->max_discard_segments = dest_limits->max_discard_segments;
2083}
2084
2085static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2086{
2087 struct clone *clone = ti->private;
2088 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2089
2090
2091
2092
2093
2094 if (io_opt_sectors < clone->region_size ||
2095 do_div(io_opt_sectors, clone->region_size)) {
2096 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2097 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2098 }
2099
2100 disable_passdown_if_not_supported(clone);
2101 set_discard_limits(clone, limits);
2102}
2103
2104static int clone_iterate_devices(struct dm_target *ti,
2105 iterate_devices_callout_fn fn, void *data)
2106{
2107 int ret;
2108 struct clone *clone = ti->private;
2109 struct dm_dev *dest_dev = clone->dest_dev;
2110 struct dm_dev *source_dev = clone->source_dev;
2111
2112 ret = fn(ti, source_dev, 0, ti->len, data);
2113 if (!ret)
2114 ret = fn(ti, dest_dev, 0, ti->len, data);
2115 return ret;
2116}
2117
2118
2119
2120
2121static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2122{
2123 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2124
2125
2126
2127
2128
2129
2130 wake_worker(clone);
2131}
2132
2133static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2134{
2135 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2136}
2137
2138static void enable_hydration(struct clone *clone)
2139{
2140 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2141 wake_worker(clone);
2142}
2143
2144static void disable_hydration(struct clone *clone)
2145{
2146 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2147}
2148
2149static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2150 char *result, unsigned int maxlen)
2151{
2152 struct clone *clone = ti->private;
2153 unsigned int value;
2154
2155 if (!argc)
2156 return -EINVAL;
2157
2158 if (!strcasecmp(argv[0], "enable_hydration")) {
2159 enable_hydration(clone);
2160 return 0;
2161 }
2162
2163 if (!strcasecmp(argv[0], "disable_hydration")) {
2164 disable_hydration(clone);
2165 return 0;
2166 }
2167
2168 if (argc != 2)
2169 return -EINVAL;
2170
2171 if (!strcasecmp(argv[0], "hydration_threshold")) {
2172 if (kstrtouint(argv[1], 10, &value))
2173 return -EINVAL;
2174
2175 set_hydration_threshold(clone, value);
2176
2177 return 0;
2178 }
2179
2180 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2181 if (kstrtouint(argv[1], 10, &value))
2182 return -EINVAL;
2183
2184 set_hydration_batch_size(clone, value);
2185
2186 return 0;
2187 }
2188
2189 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2190 return -EINVAL;
2191}
2192
2193static struct target_type clone_target = {
2194 .name = "clone",
2195 .version = {1, 0, 0},
2196 .module = THIS_MODULE,
2197 .ctr = clone_ctr,
2198 .dtr = clone_dtr,
2199 .map = clone_map,
2200 .end_io = clone_endio,
2201 .postsuspend = clone_postsuspend,
2202 .resume = clone_resume,
2203 .status = clone_status,
2204 .message = clone_message,
2205 .io_hints = clone_io_hints,
2206 .iterate_devices = clone_iterate_devices,
2207};
2208
2209
2210
2211
2212static int __init dm_clone_init(void)
2213{
2214 int r;
2215
2216 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2217 if (!_hydration_cache)
2218 return -ENOMEM;
2219
2220 r = dm_register_target(&clone_target);
2221 if (r < 0) {
2222 DMERR("Failed to register clone target");
2223 return r;
2224 }
2225
2226 return 0;
2227}
2228
2229static void __exit dm_clone_exit(void)
2230{
2231 dm_unregister_target(&clone_target);
2232
2233 kmem_cache_destroy(_hydration_cache);
2234 _hydration_cache = NULL;
2235}
2236
2237
2238module_init(dm_clone_init);
2239module_exit(dm_clone_exit);
2240
2241MODULE_DESCRIPTION(DM_NAME " clone target");
2242MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2243MODULE_LICENSE("GPL");
2244