1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/wait.h>
16#include <linux/blkdev.h>
17#include <linux/slab.h>
18#include <linux/raid/md_p.h>
19#include <linux/crc32c.h>
20#include <linux/random.h>
21#include "md.h"
22#include "raid5.h"
23
24
25
26
27
28#define BLOCK_SECTORS (8)
29
30
31
32
33
34#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2)
35#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
37
38
39
40
41#define R5L_POOL_SIZE 4
42
43struct r5l_log {
44 struct md_rdev *rdev;
45
46 u32 uuid_checksum;
47
48 sector_t device_size;
49
50 sector_t max_free_space;
51
52
53 sector_t last_checkpoint;
54
55 u64 last_cp_seq;
56
57 sector_t log_start;
58 u64 seq;
59
60 sector_t next_checkpoint;
61 u64 next_cp_seq;
62
63 struct mutex io_mutex;
64 struct r5l_io_unit *current_io;
65
66 spinlock_t io_list_lock;
67 struct list_head running_ios;
68
69
70 struct list_head io_end_ios;
71
72
73 struct list_head flushing_ios;
74
75 struct list_head finished_ios;
76 struct bio flush_bio;
77
78 struct list_head no_mem_stripes;
79
80 struct kmem_cache *io_kc;
81 mempool_t *io_pool;
82 struct bio_set *bs;
83 mempool_t *meta_pool;
84
85 struct md_thread *reclaim_thread;
86 unsigned long reclaim_target;
87
88
89
90
91
92
93 wait_queue_head_t iounit_wait;
94
95 struct list_head no_space_stripes;
96 spinlock_t no_space_stripes_lock;
97
98 bool need_cache_flush;
99 bool in_teardown;
100};
101
102
103
104
105
106
107
108
109struct r5l_io_unit {
110 struct r5l_log *log;
111
112 struct page *meta_page;
113 int meta_offset;
114
115 struct bio *current_bio;
116
117 atomic_t pending_stripe;
118 u64 seq;
119 sector_t log_start;
120 sector_t log_end;
121 struct list_head log_sibling;
122 struct list_head stripe_list;
123
124 int state;
125 bool need_split_bio;
126};
127
128
129enum r5l_io_unit_state {
130 IO_UNIT_RUNNING = 0,
131 IO_UNIT_IO_START = 1,
132
133 IO_UNIT_IO_END = 2,
134 IO_UNIT_STRIPE_END = 3,
135};
136
137static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
138{
139 start += inc;
140 if (start >= log->device_size)
141 start = start - log->device_size;
142 return start;
143}
144
145static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
146 sector_t end)
147{
148 if (end >= start)
149 return end - start;
150 else
151 return end + log->device_size - start;
152}
153
154static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
155{
156 sector_t used_size;
157
158 used_size = r5l_ring_distance(log, log->last_checkpoint,
159 log->log_start);
160
161 return log->device_size > used_size + size;
162}
163
164static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
165 enum r5l_io_unit_state state)
166{
167 if (WARN_ON(io->state >= state))
168 return;
169 io->state = state;
170}
171
172static void r5l_io_run_stripes(struct r5l_io_unit *io)
173{
174 struct stripe_head *sh, *next;
175
176 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
177 list_del_init(&sh->log_list);
178 set_bit(STRIPE_HANDLE, &sh->state);
179 raid5_release_stripe(sh);
180 }
181}
182
183static void r5l_log_run_stripes(struct r5l_log *log)
184{
185 struct r5l_io_unit *io, *next;
186
187 assert_spin_locked(&log->io_list_lock);
188
189 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
190
191 if (io->state < IO_UNIT_IO_END)
192 break;
193
194 list_move_tail(&io->log_sibling, &log->finished_ios);
195 r5l_io_run_stripes(io);
196 }
197}
198
199static void r5l_move_to_end_ios(struct r5l_log *log)
200{
201 struct r5l_io_unit *io, *next;
202
203 assert_spin_locked(&log->io_list_lock);
204
205 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
206
207 if (io->state < IO_UNIT_IO_END)
208 break;
209 list_move_tail(&io->log_sibling, &log->io_end_ios);
210 }
211}
212
213static void r5l_log_endio(struct bio *bio)
214{
215 struct r5l_io_unit *io = bio->bi_private;
216 struct r5l_log *log = io->log;
217 unsigned long flags;
218
219 if (bio->bi_error)
220 md_error(log->rdev->mddev, log->rdev);
221
222 bio_put(bio);
223 mempool_free(io->meta_page, log->meta_pool);
224
225 spin_lock_irqsave(&log->io_list_lock, flags);
226 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
227 if (log->need_cache_flush)
228 r5l_move_to_end_ios(log);
229 else
230 r5l_log_run_stripes(log);
231 spin_unlock_irqrestore(&log->io_list_lock, flags);
232
233 if (log->need_cache_flush)
234 md_wakeup_thread(log->rdev->mddev->thread);
235}
236
237static void r5l_submit_current_io(struct r5l_log *log)
238{
239 struct r5l_io_unit *io = log->current_io;
240 struct r5l_meta_block *block;
241 unsigned long flags;
242 u32 crc;
243
244 if (!io)
245 return;
246
247 block = page_address(io->meta_page);
248 block->meta_size = cpu_to_le32(io->meta_offset);
249 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
250 block->checksum = cpu_to_le32(crc);
251
252 log->current_io = NULL;
253 spin_lock_irqsave(&log->io_list_lock, flags);
254 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
255 spin_unlock_irqrestore(&log->io_list_lock, flags);
256
257 submit_bio(WRITE, io->current_bio);
258}
259
260static struct bio *r5l_bio_alloc(struct r5l_log *log)
261{
262 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
263
264 bio->bi_rw = WRITE;
265 bio->bi_bdev = log->rdev->bdev;
266 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
267
268 return bio;
269}
270
271static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
272{
273 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
274
275
276
277
278
279
280
281
282 if (log->log_start == 0)
283 io->need_split_bio = true;
284
285 io->log_end = log->log_start;
286}
287
288static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
289{
290 struct r5l_io_unit *io;
291 struct r5l_meta_block *block;
292
293 io = mempool_alloc(log->io_pool, GFP_ATOMIC);
294 if (!io)
295 return NULL;
296 memset(io, 0, sizeof(*io));
297
298 io->log = log;
299 INIT_LIST_HEAD(&io->log_sibling);
300 INIT_LIST_HEAD(&io->stripe_list);
301 io->state = IO_UNIT_RUNNING;
302
303 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
304 block = page_address(io->meta_page);
305 clear_page(block);
306 block->magic = cpu_to_le32(R5LOG_MAGIC);
307 block->version = R5LOG_VERSION;
308 block->seq = cpu_to_le64(log->seq);
309 block->position = cpu_to_le64(log->log_start);
310
311 io->log_start = log->log_start;
312 io->meta_offset = sizeof(struct r5l_meta_block);
313 io->seq = log->seq++;
314
315 io->current_bio = r5l_bio_alloc(log);
316 io->current_bio->bi_end_io = r5l_log_endio;
317 io->current_bio->bi_private = io;
318 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
319
320 r5_reserve_log_entry(log, io);
321
322 spin_lock_irq(&log->io_list_lock);
323 list_add_tail(&io->log_sibling, &log->running_ios);
324 spin_unlock_irq(&log->io_list_lock);
325
326 return io;
327}
328
329static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
330{
331 if (log->current_io &&
332 log->current_io->meta_offset + payload_size > PAGE_SIZE)
333 r5l_submit_current_io(log);
334
335 if (!log->current_io) {
336 log->current_io = r5l_new_meta(log);
337 if (!log->current_io)
338 return -ENOMEM;
339 }
340
341 return 0;
342}
343
344static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
345 sector_t location,
346 u32 checksum1, u32 checksum2,
347 bool checksum2_valid)
348{
349 struct r5l_io_unit *io = log->current_io;
350 struct r5l_payload_data_parity *payload;
351
352 payload = page_address(io->meta_page) + io->meta_offset;
353 payload->header.type = cpu_to_le16(type);
354 payload->header.flags = cpu_to_le16(0);
355 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
356 (PAGE_SHIFT - 9));
357 payload->location = cpu_to_le64(location);
358 payload->checksum[0] = cpu_to_le32(checksum1);
359 if (checksum2_valid)
360 payload->checksum[1] = cpu_to_le32(checksum2);
361
362 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
363 sizeof(__le32) * (1 + !!checksum2_valid);
364}
365
366static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
367{
368 struct r5l_io_unit *io = log->current_io;
369
370 if (io->need_split_bio) {
371 struct bio *prev = io->current_bio;
372
373 io->current_bio = r5l_bio_alloc(log);
374 bio_chain(io->current_bio, prev);
375
376 submit_bio(WRITE, prev);
377 }
378
379 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
380 BUG();
381
382 r5_reserve_log_entry(log, io);
383}
384
385static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
386 int data_pages, int parity_pages)
387{
388 int i;
389 int meta_size;
390 int ret;
391 struct r5l_io_unit *io;
392
393 meta_size =
394 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
395 * data_pages) +
396 sizeof(struct r5l_payload_data_parity) +
397 sizeof(__le32) * parity_pages;
398
399 ret = r5l_get_meta(log, meta_size);
400 if (ret)
401 return ret;
402
403 io = log->current_io;
404
405 for (i = 0; i < sh->disks; i++) {
406 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
407 continue;
408 if (i == sh->pd_idx || i == sh->qd_idx)
409 continue;
410 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
411 raid5_compute_blocknr(sh, i, 0),
412 sh->dev[i].log_checksum, 0, false);
413 r5l_append_payload_page(log, sh->dev[i].page);
414 }
415
416 if (sh->qd_idx >= 0) {
417 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
418 sh->sector, sh->dev[sh->pd_idx].log_checksum,
419 sh->dev[sh->qd_idx].log_checksum, true);
420 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
421 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
422 } else {
423 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
424 sh->sector, sh->dev[sh->pd_idx].log_checksum,
425 0, false);
426 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
427 }
428
429 list_add_tail(&sh->log_list, &io->stripe_list);
430 atomic_inc(&io->pending_stripe);
431 sh->log_io = io;
432
433 return 0;
434}
435
436static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
437
438
439
440
441int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
442{
443 int write_disks = 0;
444 int data_pages, parity_pages;
445 int meta_size;
446 int reserve;
447 int i;
448 int ret = 0;
449
450 if (!log)
451 return -EAGAIN;
452
453 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
454 test_bit(STRIPE_SYNCING, &sh->state)) {
455
456 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
457 return -EAGAIN;
458 }
459
460 for (i = 0; i < sh->disks; i++) {
461 void *addr;
462
463 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
464 continue;
465 write_disks++;
466
467 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
468 continue;
469 addr = kmap_atomic(sh->dev[i].page);
470 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
471 addr, PAGE_SIZE);
472 kunmap_atomic(addr);
473 }
474 parity_pages = 1 + !!(sh->qd_idx >= 0);
475 data_pages = write_disks - parity_pages;
476
477 meta_size =
478 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
479 * data_pages) +
480 sizeof(struct r5l_payload_data_parity) +
481 sizeof(__le32) * parity_pages;
482
483 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
484 return -EINVAL;
485
486 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
487
488
489
490
491 clear_bit(STRIPE_DELAYED, &sh->state);
492 atomic_inc(&sh->count);
493
494 mutex_lock(&log->io_mutex);
495
496 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
497 if (!r5l_has_free_space(log, reserve)) {
498 spin_lock(&log->no_space_stripes_lock);
499 list_add_tail(&sh->log_list, &log->no_space_stripes);
500 spin_unlock(&log->no_space_stripes_lock);
501
502 r5l_wake_reclaim(log, reserve);
503 } else {
504 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
505 if (ret) {
506 spin_lock_irq(&log->io_list_lock);
507 list_add_tail(&sh->log_list, &log->no_mem_stripes);
508 spin_unlock_irq(&log->io_list_lock);
509 }
510 }
511
512 mutex_unlock(&log->io_mutex);
513 return 0;
514}
515
516void r5l_write_stripe_run(struct r5l_log *log)
517{
518 if (!log)
519 return;
520 mutex_lock(&log->io_mutex);
521 r5l_submit_current_io(log);
522 mutex_unlock(&log->io_mutex);
523}
524
525int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
526{
527 if (!log)
528 return -ENODEV;
529
530
531
532
533
534
535 if (bio->bi_iter.bi_size == 0) {
536 bio_endio(bio);
537 return 0;
538 }
539 bio->bi_rw &= ~REQ_FLUSH;
540 return -EAGAIN;
541}
542
543
544static void r5l_run_no_space_stripes(struct r5l_log *log)
545{
546 struct stripe_head *sh;
547
548 spin_lock(&log->no_space_stripes_lock);
549 while (!list_empty(&log->no_space_stripes)) {
550 sh = list_first_entry(&log->no_space_stripes,
551 struct stripe_head, log_list);
552 list_del_init(&sh->log_list);
553 set_bit(STRIPE_HANDLE, &sh->state);
554 raid5_release_stripe(sh);
555 }
556 spin_unlock(&log->no_space_stripes_lock);
557}
558
559static sector_t r5l_reclaimable_space(struct r5l_log *log)
560{
561 return r5l_ring_distance(log, log->last_checkpoint,
562 log->next_checkpoint);
563}
564
565static void r5l_run_no_mem_stripe(struct r5l_log *log)
566{
567 struct stripe_head *sh;
568
569 assert_spin_locked(&log->io_list_lock);
570
571 if (!list_empty(&log->no_mem_stripes)) {
572 sh = list_first_entry(&log->no_mem_stripes,
573 struct stripe_head, log_list);
574 list_del_init(&sh->log_list);
575 set_bit(STRIPE_HANDLE, &sh->state);
576 raid5_release_stripe(sh);
577 }
578}
579
580static bool r5l_complete_finished_ios(struct r5l_log *log)
581{
582 struct r5l_io_unit *io, *next;
583 bool found = false;
584
585 assert_spin_locked(&log->io_list_lock);
586
587 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
588
589 if (io->state < IO_UNIT_STRIPE_END)
590 break;
591
592 log->next_checkpoint = io->log_start;
593 log->next_cp_seq = io->seq;
594
595 list_del(&io->log_sibling);
596 mempool_free(io, log->io_pool);
597 r5l_run_no_mem_stripe(log);
598
599 found = true;
600 }
601
602 return found;
603}
604
605static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
606{
607 struct r5l_log *log = io->log;
608 unsigned long flags;
609
610 spin_lock_irqsave(&log->io_list_lock, flags);
611 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
612
613 if (!r5l_complete_finished_ios(log)) {
614 spin_unlock_irqrestore(&log->io_list_lock, flags);
615 return;
616 }
617
618 if (r5l_reclaimable_space(log) > log->max_free_space)
619 r5l_wake_reclaim(log, 0);
620
621 spin_unlock_irqrestore(&log->io_list_lock, flags);
622 wake_up(&log->iounit_wait);
623}
624
625void r5l_stripe_write_finished(struct stripe_head *sh)
626{
627 struct r5l_io_unit *io;
628
629 io = sh->log_io;
630 sh->log_io = NULL;
631
632 if (io && atomic_dec_and_test(&io->pending_stripe))
633 __r5l_stripe_write_finished(io);
634}
635
636static void r5l_log_flush_endio(struct bio *bio)
637{
638 struct r5l_log *log = container_of(bio, struct r5l_log,
639 flush_bio);
640 unsigned long flags;
641 struct r5l_io_unit *io;
642
643 if (bio->bi_error)
644 md_error(log->rdev->mddev, log->rdev);
645
646 spin_lock_irqsave(&log->io_list_lock, flags);
647 list_for_each_entry(io, &log->flushing_ios, log_sibling)
648 r5l_io_run_stripes(io);
649 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
650 spin_unlock_irqrestore(&log->io_list_lock, flags);
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667void r5l_flush_stripe_to_raid(struct r5l_log *log)
668{
669 bool do_flush;
670
671 if (!log || !log->need_cache_flush)
672 return;
673
674 spin_lock_irq(&log->io_list_lock);
675
676 if (!list_empty(&log->flushing_ios)) {
677 spin_unlock_irq(&log->io_list_lock);
678 return;
679 }
680 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
681 do_flush = !list_empty(&log->flushing_ios);
682 spin_unlock_irq(&log->io_list_lock);
683
684 if (!do_flush)
685 return;
686 bio_reset(&log->flush_bio);
687 log->flush_bio.bi_bdev = log->rdev->bdev;
688 log->flush_bio.bi_end_io = r5l_log_flush_endio;
689 submit_bio(WRITE_FLUSH, &log->flush_bio);
690}
691
692static void r5l_write_super(struct r5l_log *log, sector_t cp);
693static void r5l_write_super_and_discard_space(struct r5l_log *log,
694 sector_t end)
695{
696 struct block_device *bdev = log->rdev->bdev;
697 struct mddev *mddev;
698
699 r5l_write_super(log, end);
700
701 if (!blk_queue_discard(bdev_get_queue(bdev)))
702 return;
703
704 mddev = log->rdev->mddev;
705
706
707
708
709
710
711
712
713
714 if (!log->in_teardown) {
715 set_bit(MD_CHANGE_DEVS, &mddev->flags);
716 set_bit(MD_CHANGE_PENDING, &mddev->flags);
717 md_wakeup_thread(mddev->thread);
718 wait_event(mddev->sb_wait,
719 !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
720 log->in_teardown);
721
722
723
724
725 if (log->in_teardown)
726 md_update_sb(mddev, 1);
727 } else {
728 WARN_ON(!mddev_is_locked(mddev));
729 md_update_sb(mddev, 1);
730 }
731
732
733 if (log->last_checkpoint < end) {
734 blkdev_issue_discard(bdev,
735 log->last_checkpoint + log->rdev->data_offset,
736 end - log->last_checkpoint, GFP_NOIO, 0);
737 } else {
738 blkdev_issue_discard(bdev,
739 log->last_checkpoint + log->rdev->data_offset,
740 log->device_size - log->last_checkpoint,
741 GFP_NOIO, 0);
742 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
743 GFP_NOIO, 0);
744 }
745}
746
747
748static void r5l_do_reclaim(struct r5l_log *log)
749{
750 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
751 sector_t reclaimable;
752 sector_t next_checkpoint;
753 u64 next_cp_seq;
754
755 spin_lock_irq(&log->io_list_lock);
756
757
758
759
760
761 while (1) {
762 reclaimable = r5l_reclaimable_space(log);
763 if (reclaimable >= reclaim_target ||
764 (list_empty(&log->running_ios) &&
765 list_empty(&log->io_end_ios) &&
766 list_empty(&log->flushing_ios) &&
767 list_empty(&log->finished_ios)))
768 break;
769
770 md_wakeup_thread(log->rdev->mddev->thread);
771 wait_event_lock_irq(log->iounit_wait,
772 r5l_reclaimable_space(log) > reclaimable,
773 log->io_list_lock);
774 }
775
776 next_checkpoint = log->next_checkpoint;
777 next_cp_seq = log->next_cp_seq;
778 spin_unlock_irq(&log->io_list_lock);
779
780 BUG_ON(reclaimable < 0);
781 if (reclaimable == 0)
782 return;
783
784
785
786
787
788
789 r5l_write_super_and_discard_space(log, next_checkpoint);
790
791 mutex_lock(&log->io_mutex);
792 log->last_checkpoint = next_checkpoint;
793 log->last_cp_seq = next_cp_seq;
794 mutex_unlock(&log->io_mutex);
795
796 r5l_run_no_space_stripes(log);
797}
798
799static void r5l_reclaim_thread(struct md_thread *thread)
800{
801 struct mddev *mddev = thread->mddev;
802 struct r5conf *conf = mddev->private;
803 struct r5l_log *log = conf->log;
804
805 if (!log)
806 return;
807 r5l_do_reclaim(log);
808}
809
810static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
811{
812 unsigned long target;
813 unsigned long new = (unsigned long)space;
814
815 do {
816 target = log->reclaim_target;
817 if (new < target)
818 return;
819 } while (cmpxchg(&log->reclaim_target, target, new) != target);
820 md_wakeup_thread(log->reclaim_thread);
821}
822
823void r5l_quiesce(struct r5l_log *log, int state)
824{
825 struct mddev *mddev;
826 if (!log || state == 2)
827 return;
828 if (state == 0) {
829 log->in_teardown = 0;
830
831
832
833
834
835 if (log->reclaim_thread)
836 return;
837 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
838 log->rdev->mddev, "reclaim");
839 } else if (state == 1) {
840
841
842
843
844 log->in_teardown = 1;
845
846 mddev = log->rdev->mddev;
847 wake_up(&mddev->sb_wait);
848 r5l_wake_reclaim(log, -1L);
849 md_unregister_thread(&log->reclaim_thread);
850 r5l_do_reclaim(log);
851 }
852}
853
854bool r5l_log_disk_error(struct r5conf *conf)
855{
856 struct r5l_log *log;
857 bool ret;
858
859 rcu_read_lock();
860 log = rcu_dereference(conf->log);
861
862 if (!log)
863 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
864 else
865 ret = test_bit(Faulty, &log->rdev->flags);
866 rcu_read_unlock();
867 return ret;
868}
869
870struct r5l_recovery_ctx {
871 struct page *meta_page;
872 sector_t meta_total_blocks;
873 sector_t pos;
874 u64 seq;
875};
876
877static int r5l_read_meta_block(struct r5l_log *log,
878 struct r5l_recovery_ctx *ctx)
879{
880 struct page *page = ctx->meta_page;
881 struct r5l_meta_block *mb;
882 u32 crc, stored_crc;
883
884 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
885 return -EIO;
886
887 mb = page_address(page);
888 stored_crc = le32_to_cpu(mb->checksum);
889 mb->checksum = 0;
890
891 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
892 le64_to_cpu(mb->seq) != ctx->seq ||
893 mb->version != R5LOG_VERSION ||
894 le64_to_cpu(mb->position) != ctx->pos)
895 return -EINVAL;
896
897 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
898 if (stored_crc != crc)
899 return -EINVAL;
900
901 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
902 return -EINVAL;
903
904 ctx->meta_total_blocks = BLOCK_SECTORS;
905
906 return 0;
907}
908
909static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
910 struct r5l_recovery_ctx *ctx,
911 sector_t stripe_sect,
912 int *offset, sector_t *log_offset)
913{
914 struct r5conf *conf = log->rdev->mddev->private;
915 struct stripe_head *sh;
916 struct r5l_payload_data_parity *payload;
917 int disk_index;
918
919 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
920 while (1) {
921 payload = page_address(ctx->meta_page) + *offset;
922
923 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
924 raid5_compute_sector(conf,
925 le64_to_cpu(payload->location), 0,
926 &disk_index, sh);
927
928 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
929 sh->dev[disk_index].page, READ, false);
930 sh->dev[disk_index].log_checksum =
931 le32_to_cpu(payload->checksum[0]);
932 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
933 ctx->meta_total_blocks += BLOCK_SECTORS;
934 } else {
935 disk_index = sh->pd_idx;
936 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
937 sh->dev[disk_index].page, READ, false);
938 sh->dev[disk_index].log_checksum =
939 le32_to_cpu(payload->checksum[0]);
940 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
941
942 if (sh->qd_idx >= 0) {
943 disk_index = sh->qd_idx;
944 sync_page_io(log->rdev,
945 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
946 PAGE_SIZE, sh->dev[disk_index].page,
947 READ, false);
948 sh->dev[disk_index].log_checksum =
949 le32_to_cpu(payload->checksum[1]);
950 set_bit(R5_Wantwrite,
951 &sh->dev[disk_index].flags);
952 }
953 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
954 }
955
956 *log_offset = r5l_ring_add(log, *log_offset,
957 le32_to_cpu(payload->size));
958 *offset += sizeof(struct r5l_payload_data_parity) +
959 sizeof(__le32) *
960 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
961 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
962 break;
963 }
964
965 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
966 void *addr;
967 u32 checksum;
968
969 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
970 continue;
971 addr = kmap_atomic(sh->dev[disk_index].page);
972 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
973 kunmap_atomic(addr);
974 if (checksum != sh->dev[disk_index].log_checksum)
975 goto error;
976 }
977
978 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
979 struct md_rdev *rdev, *rrdev;
980
981 if (!test_and_clear_bit(R5_Wantwrite,
982 &sh->dev[disk_index].flags))
983 continue;
984
985
986 rdev = rcu_dereference(conf->disks[disk_index].rdev);
987 if (rdev)
988 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
989 sh->dev[disk_index].page, WRITE, false);
990 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
991 if (rrdev)
992 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
993 sh->dev[disk_index].page, WRITE, false);
994 }
995 raid5_release_stripe(sh);
996 return 0;
997
998error:
999 for (disk_index = 0; disk_index < sh->disks; disk_index++)
1000 sh->dev[disk_index].flags = 0;
1001 raid5_release_stripe(sh);
1002 return -EINVAL;
1003}
1004
1005static int r5l_recovery_flush_one_meta(struct r5l_log *log,
1006 struct r5l_recovery_ctx *ctx)
1007{
1008 struct r5conf *conf = log->rdev->mddev->private;
1009 struct r5l_payload_data_parity *payload;
1010 struct r5l_meta_block *mb;
1011 int offset;
1012 sector_t log_offset;
1013 sector_t stripe_sector;
1014
1015 mb = page_address(ctx->meta_page);
1016 offset = sizeof(struct r5l_meta_block);
1017 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1018
1019 while (offset < le32_to_cpu(mb->meta_size)) {
1020 int dd;
1021
1022 payload = (void *)mb + offset;
1023 stripe_sector = raid5_compute_sector(conf,
1024 le64_to_cpu(payload->location), 0, &dd, NULL);
1025 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
1026 &offset, &log_offset))
1027 return -EINVAL;
1028 }
1029 return 0;
1030}
1031
1032
1033static void r5l_recovery_flush_log(struct r5l_log *log,
1034 struct r5l_recovery_ctx *ctx)
1035{
1036 while (1) {
1037 if (r5l_read_meta_block(log, ctx))
1038 return;
1039 if (r5l_recovery_flush_one_meta(log, ctx))
1040 return;
1041 ctx->seq++;
1042 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
1043 }
1044}
1045
1046static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1047 u64 seq)
1048{
1049 struct page *page;
1050 struct r5l_meta_block *mb;
1051 u32 crc;
1052
1053 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1054 if (!page)
1055 return -ENOMEM;
1056 mb = page_address(page);
1057 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1058 mb->version = R5LOG_VERSION;
1059 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1060 mb->seq = cpu_to_le64(seq);
1061 mb->position = cpu_to_le64(pos);
1062 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1063 mb->checksum = cpu_to_le32(crc);
1064
1065 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
1066 __free_page(page);
1067 return -EIO;
1068 }
1069 __free_page(page);
1070 return 0;
1071}
1072
1073static int r5l_recovery_log(struct r5l_log *log)
1074{
1075 struct r5l_recovery_ctx ctx;
1076
1077 ctx.pos = log->last_checkpoint;
1078 ctx.seq = log->last_cp_seq;
1079 ctx.meta_page = alloc_page(GFP_KERNEL);
1080 if (!ctx.meta_page)
1081 return -ENOMEM;
1082
1083 r5l_recovery_flush_log(log, &ctx);
1084 __free_page(ctx.meta_page);
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099 if (ctx.seq > log->last_cp_seq + 1) {
1100 int ret;
1101
1102 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
1103 if (ret)
1104 return ret;
1105 log->seq = ctx.seq + 11;
1106 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
1107 r5l_write_super(log, ctx.pos);
1108 } else {
1109 log->log_start = ctx.pos;
1110 log->seq = ctx.seq;
1111 }
1112 return 0;
1113}
1114
1115static void r5l_write_super(struct r5l_log *log, sector_t cp)
1116{
1117 struct mddev *mddev = log->rdev->mddev;
1118
1119 log->rdev->journal_tail = cp;
1120 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1121}
1122
1123static int r5l_load_log(struct r5l_log *log)
1124{
1125 struct md_rdev *rdev = log->rdev;
1126 struct page *page;
1127 struct r5l_meta_block *mb;
1128 sector_t cp = log->rdev->journal_tail;
1129 u32 stored_crc, expected_crc;
1130 bool create_super = false;
1131 int ret;
1132
1133
1134 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1135 cp = 0;
1136 page = alloc_page(GFP_KERNEL);
1137 if (!page)
1138 return -ENOMEM;
1139
1140 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1141 ret = -EIO;
1142 goto ioerr;
1143 }
1144 mb = page_address(page);
1145
1146 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1147 mb->version != R5LOG_VERSION) {
1148 create_super = true;
1149 goto create;
1150 }
1151 stored_crc = le32_to_cpu(mb->checksum);
1152 mb->checksum = 0;
1153 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1154 if (stored_crc != expected_crc) {
1155 create_super = true;
1156 goto create;
1157 }
1158 if (le64_to_cpu(mb->position) != cp) {
1159 create_super = true;
1160 goto create;
1161 }
1162create:
1163 if (create_super) {
1164 log->last_cp_seq = prandom_u32();
1165 cp = 0;
1166
1167
1168
1169
1170
1171 r5l_write_super(log, cp);
1172 } else
1173 log->last_cp_seq = le64_to_cpu(mb->seq);
1174
1175 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
1176 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1177 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1178 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
1179 log->last_checkpoint = cp;
1180
1181 __free_page(page);
1182
1183 return r5l_recovery_log(log);
1184ioerr:
1185 __free_page(page);
1186 return ret;
1187}
1188
1189int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1190{
1191 struct r5l_log *log;
1192
1193 if (PAGE_SIZE != 4096)
1194 return -EINVAL;
1195 log = kzalloc(sizeof(*log), GFP_KERNEL);
1196 if (!log)
1197 return -ENOMEM;
1198 log->rdev = rdev;
1199
1200 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
1201
1202 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1203 sizeof(rdev->mddev->uuid));
1204
1205 mutex_init(&log->io_mutex);
1206
1207 spin_lock_init(&log->io_list_lock);
1208 INIT_LIST_HEAD(&log->running_ios);
1209 INIT_LIST_HEAD(&log->io_end_ios);
1210 INIT_LIST_HEAD(&log->flushing_ios);
1211 INIT_LIST_HEAD(&log->finished_ios);
1212 bio_init(&log->flush_bio);
1213
1214 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1215 if (!log->io_kc)
1216 goto io_kc;
1217
1218 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
1219 if (!log->io_pool)
1220 goto io_pool;
1221
1222 log->bs = bioset_create(R5L_POOL_SIZE, 0);
1223 if (!log->bs)
1224 goto io_bs;
1225
1226 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
1227 if (!log->meta_pool)
1228 goto out_mempool;
1229
1230 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1231 log->rdev->mddev, "reclaim");
1232 if (!log->reclaim_thread)
1233 goto reclaim_thread;
1234 init_waitqueue_head(&log->iounit_wait);
1235
1236 INIT_LIST_HEAD(&log->no_mem_stripes);
1237
1238 INIT_LIST_HEAD(&log->no_space_stripes);
1239 spin_lock_init(&log->no_space_stripes_lock);
1240
1241 if (r5l_load_log(log))
1242 goto error;
1243
1244 rcu_assign_pointer(conf->log, log);
1245 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1246 return 0;
1247
1248error:
1249 md_unregister_thread(&log->reclaim_thread);
1250reclaim_thread:
1251 mempool_destroy(log->meta_pool);
1252out_mempool:
1253 bioset_free(log->bs);
1254io_bs:
1255 mempool_destroy(log->io_pool);
1256io_pool:
1257 kmem_cache_destroy(log->io_kc);
1258io_kc:
1259 kfree(log);
1260 return -EINVAL;
1261}
1262
1263void r5l_exit_log(struct r5l_log *log)
1264{
1265 md_unregister_thread(&log->reclaim_thread);
1266 mempool_destroy(log->meta_pool);
1267 bioset_free(log->bs);
1268 mempool_destroy(log->io_pool);
1269 kmem_cache_destroy(log->io_kc);
1270 kfree(log);
1271}
1272