1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/crc32c.h>
11#include <linux/async_tx.h>
12#include <linux/raid/md_p.h>
13#include "md.h"
14#include "raid5.h"
15#include "raid5-log.h"
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85#define PPL_SPACE_SIZE (128 * 1024)
86
87struct ppl_conf {
88 struct mddev *mddev;
89
90
91 struct ppl_log *child_logs;
92 int count;
93
94 int block_size;
95
96 u32 signature;
97 atomic64_t seq;
98
99 struct kmem_cache *io_kc;
100 mempool_t io_pool;
101 struct bio_set bs;
102 struct bio_set flush_bs;
103
104
105 int recovered_entries;
106 int mismatch_count;
107
108
109 struct list_head no_mem_stripes;
110 spinlock_t no_mem_stripes_lock;
111
112 unsigned short write_hint;
113};
114
115struct ppl_log {
116 struct ppl_conf *ppl_conf;
117
118 struct md_rdev *rdev;
119
120 struct mutex io_mutex;
121 struct ppl_io_unit *current_io;
122
123 spinlock_t io_list_lock;
124 struct list_head io_list;
125
126 sector_t next_io_sector;
127 unsigned int entry_space;
128 bool use_multippl;
129 bool wb_cache_on;
130 unsigned long disk_flush_bitmap;
131};
132
133#define PPL_IO_INLINE_BVECS 32
134
135struct ppl_io_unit {
136 struct ppl_log *log;
137
138 struct page *header_page;
139
140 unsigned int entries_count;
141 unsigned int pp_size;
142
143 u64 seq;
144 struct list_head log_sibling;
145
146 struct list_head stripe_list;
147 atomic_t pending_stripes;
148 atomic_t pending_flushes;
149
150 bool submitted;
151
152
153 struct bio bio;
154 struct bio_vec biovec[PPL_IO_INLINE_BVECS];
155};
156
157struct dma_async_tx_descriptor *
158ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
159 struct dma_async_tx_descriptor *tx)
160{
161 int disks = sh->disks;
162 struct page **srcs = percpu->scribble;
163 int count = 0, pd_idx = sh->pd_idx, i;
164 struct async_submit_ctl submit;
165
166 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
167
168
169
170
171
172
173
174 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175
176
177
178
179
180 srcs[count++] = sh->dev[pd_idx].page;
181 } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
182
183 for (i = disks; i--;) {
184 struct r5dev *dev = &sh->dev[i];
185 if (test_bit(R5_UPTODATE, &dev->flags))
186 srcs[count++] = dev->page;
187 }
188 } else {
189 return tx;
190 }
191
192 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
193 NULL, sh, (void *) (srcs + sh->disks + 2));
194
195 if (count == 1)
196 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197 &submit);
198 else
199 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200 &submit);
201
202 return tx;
203}
204
205static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206{
207 struct kmem_cache *kc = pool_data;
208 struct ppl_io_unit *io;
209
210 io = kmem_cache_alloc(kc, gfp_mask);
211 if (!io)
212 return NULL;
213
214 io->header_page = alloc_page(gfp_mask);
215 if (!io->header_page) {
216 kmem_cache_free(kc, io);
217 return NULL;
218 }
219
220 return io;
221}
222
223static void ppl_io_pool_free(void *element, void *pool_data)
224{
225 struct kmem_cache *kc = pool_data;
226 struct ppl_io_unit *io = element;
227
228 __free_page(io->header_page);
229 kmem_cache_free(kc, io);
230}
231
232static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233 struct stripe_head *sh)
234{
235 struct ppl_conf *ppl_conf = log->ppl_conf;
236 struct ppl_io_unit *io;
237 struct ppl_header *pplhdr;
238 struct page *header_page;
239
240 io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
241 if (!io)
242 return NULL;
243
244 header_page = io->header_page;
245 memset(io, 0, sizeof(*io));
246 io->header_page = header_page;
247
248 io->log = log;
249 INIT_LIST_HEAD(&io->log_sibling);
250 INIT_LIST_HEAD(&io->stripe_list);
251 atomic_set(&io->pending_stripes, 0);
252 atomic_set(&io->pending_flushes, 0);
253 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
254
255 pplhdr = page_address(io->header_page);
256 clear_page(pplhdr);
257 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
258 pplhdr->signature = cpu_to_le32(ppl_conf->signature);
259
260 io->seq = atomic64_add_return(1, &ppl_conf->seq);
261 pplhdr->generation = cpu_to_le64(io->seq);
262
263 return io;
264}
265
266static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
267{
268 struct ppl_io_unit *io = log->current_io;
269 struct ppl_header_entry *e = NULL;
270 struct ppl_header *pplhdr;
271 int i;
272 sector_t data_sector = 0;
273 int data_disks = 0;
274 struct r5conf *conf = sh->raid_conf;
275
276 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
277
278
279 if (io && (io->pp_size == log->entry_space ||
280 io->entries_count == PPL_HDR_MAX_ENTRIES)) {
281 pr_debug("%s: add io_unit blocked by seq: %llu\n",
282 __func__, io->seq);
283 io = NULL;
284 }
285
286
287 if (!io) {
288 io = ppl_new_iounit(log, sh);
289 if (!io)
290 return -ENOMEM;
291 spin_lock_irq(&log->io_list_lock);
292 list_add_tail(&io->log_sibling, &log->io_list);
293 spin_unlock_irq(&log->io_list_lock);
294
295 log->current_io = io;
296 }
297
298 for (i = 0; i < sh->disks; i++) {
299 struct r5dev *dev = &sh->dev[i];
300
301 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
302 if (!data_disks || dev->sector < data_sector)
303 data_sector = dev->sector;
304 data_disks++;
305 }
306 }
307 BUG_ON(!data_disks);
308
309 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
310 io->seq, (unsigned long long)data_sector, data_disks);
311
312 pplhdr = page_address(io->header_page);
313
314 if (io->entries_count > 0) {
315 struct ppl_header_entry *last =
316 &pplhdr->entries[io->entries_count - 1];
317 struct stripe_head *sh_last = list_last_entry(
318 &io->stripe_list, struct stripe_head, log_list);
319 u64 data_sector_last = le64_to_cpu(last->data_sector);
320 u32 data_size_last = le32_to_cpu(last->data_size);
321
322
323
324
325
326
327 if ((sh->sector == sh_last->sector + STRIPE_SECTORS) &&
328 (data_sector >> ilog2(conf->chunk_sectors) ==
329 data_sector_last >> ilog2(conf->chunk_sectors)) &&
330 ((data_sector - data_sector_last) * data_disks ==
331 data_size_last >> 9))
332 e = last;
333 }
334
335 if (!e) {
336 e = &pplhdr->entries[io->entries_count++];
337 e->data_sector = cpu_to_le64(data_sector);
338 e->parity_disk = cpu_to_le32(sh->pd_idx);
339 e->checksum = cpu_to_le32(~0);
340 }
341
342 le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
343
344
345 if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
346 le32_add_cpu(&e->pp_size, PAGE_SIZE);
347 io->pp_size += PAGE_SIZE;
348 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
349 page_address(sh->ppl_page),
350 PAGE_SIZE));
351 }
352
353 list_add_tail(&sh->log_list, &io->stripe_list);
354 atomic_inc(&io->pending_stripes);
355 sh->ppl_io = io;
356
357 return 0;
358}
359
360int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
361{
362 struct ppl_conf *ppl_conf = conf->log_private;
363 struct ppl_io_unit *io = sh->ppl_io;
364 struct ppl_log *log;
365
366 if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
367 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
368 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
369 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
370 return -EAGAIN;
371 }
372
373 log = &ppl_conf->child_logs[sh->pd_idx];
374
375 mutex_lock(&log->io_mutex);
376
377 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
378 mutex_unlock(&log->io_mutex);
379 return -EAGAIN;
380 }
381
382 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
383 clear_bit(STRIPE_DELAYED, &sh->state);
384 atomic_inc(&sh->count);
385
386 if (ppl_log_stripe(log, sh)) {
387 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
388 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
389 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
390 }
391
392 mutex_unlock(&log->io_mutex);
393
394 return 0;
395}
396
397static void ppl_log_endio(struct bio *bio)
398{
399 struct ppl_io_unit *io = bio->bi_private;
400 struct ppl_log *log = io->log;
401 struct ppl_conf *ppl_conf = log->ppl_conf;
402 struct stripe_head *sh, *next;
403
404 pr_debug("%s: seq: %llu\n", __func__, io->seq);
405
406 if (bio->bi_status)
407 md_error(ppl_conf->mddev, log->rdev);
408
409 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
410 list_del_init(&sh->log_list);
411
412 set_bit(STRIPE_HANDLE, &sh->state);
413 raid5_release_stripe(sh);
414 }
415}
416
417static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
418{
419 char b[BDEVNAME_SIZE];
420
421 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
422 __func__, io->seq, bio->bi_iter.bi_size,
423 (unsigned long long)bio->bi_iter.bi_sector,
424 bio_devname(bio, b));
425
426 submit_bio(bio);
427}
428
429static void ppl_submit_iounit(struct ppl_io_unit *io)
430{
431 struct ppl_log *log = io->log;
432 struct ppl_conf *ppl_conf = log->ppl_conf;
433 struct ppl_header *pplhdr = page_address(io->header_page);
434 struct bio *bio = &io->bio;
435 struct stripe_head *sh;
436 int i;
437
438 bio->bi_private = io;
439
440 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
441 ppl_log_endio(bio);
442 return;
443 }
444
445 for (i = 0; i < io->entries_count; i++) {
446 struct ppl_header_entry *e = &pplhdr->entries[i];
447
448 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
449 __func__, io->seq, i, le64_to_cpu(e->data_sector),
450 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
451
452 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
453 ilog2(ppl_conf->block_size >> 9));
454 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
455 }
456
457 pplhdr->entries_count = cpu_to_le32(io->entries_count);
458 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
459
460
461 if (log->use_multippl &&
462 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
463 (PPL_HEADER_SIZE + io->pp_size) >> 9)
464 log->next_io_sector = log->rdev->ppl.sector;
465
466
467 bio->bi_end_io = ppl_log_endio;
468 bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
469 bio_set_dev(bio, log->rdev->bdev);
470 bio->bi_iter.bi_sector = log->next_io_sector;
471 bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
472 bio->bi_write_hint = ppl_conf->write_hint;
473
474 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
475 (unsigned long long)log->next_io_sector);
476
477 if (log->use_multippl)
478 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
479
480 WARN_ON(log->disk_flush_bitmap != 0);
481
482 list_for_each_entry(sh, &io->stripe_list, log_list) {
483 for (i = 0; i < sh->disks; i++) {
484 struct r5dev *dev = &sh->dev[i];
485
486 if ((ppl_conf->child_logs[i].wb_cache_on) &&
487 (test_bit(R5_Wantwrite, &dev->flags))) {
488 set_bit(i, &log->disk_flush_bitmap);
489 }
490 }
491
492
493 if (test_bit(STRIPE_FULL_WRITE, &sh->state))
494 continue;
495
496 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
497 struct bio *prev = bio;
498
499 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
500 &ppl_conf->bs);
501 bio->bi_opf = prev->bi_opf;
502 bio->bi_write_hint = prev->bi_write_hint;
503 bio_copy_dev(bio, prev);
504 bio->bi_iter.bi_sector = bio_end_sector(prev);
505 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
506
507 bio_chain(bio, prev);
508 ppl_submit_iounit_bio(io, prev);
509 }
510 }
511
512 ppl_submit_iounit_bio(io, bio);
513}
514
515static void ppl_submit_current_io(struct ppl_log *log)
516{
517 struct ppl_io_unit *io;
518
519 spin_lock_irq(&log->io_list_lock);
520
521 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
522 log_sibling);
523 if (io && io->submitted)
524 io = NULL;
525
526 spin_unlock_irq(&log->io_list_lock);
527
528 if (io) {
529 io->submitted = true;
530
531 if (io == log->current_io)
532 log->current_io = NULL;
533
534 ppl_submit_iounit(io);
535 }
536}
537
538void ppl_write_stripe_run(struct r5conf *conf)
539{
540 struct ppl_conf *ppl_conf = conf->log_private;
541 struct ppl_log *log;
542 int i;
543
544 for (i = 0; i < ppl_conf->count; i++) {
545 log = &ppl_conf->child_logs[i];
546
547 mutex_lock(&log->io_mutex);
548 ppl_submit_current_io(log);
549 mutex_unlock(&log->io_mutex);
550 }
551}
552
553static void ppl_io_unit_finished(struct ppl_io_unit *io)
554{
555 struct ppl_log *log = io->log;
556 struct ppl_conf *ppl_conf = log->ppl_conf;
557 struct r5conf *conf = ppl_conf->mddev->private;
558 unsigned long flags;
559
560 pr_debug("%s: seq: %llu\n", __func__, io->seq);
561
562 local_irq_save(flags);
563
564 spin_lock(&log->io_list_lock);
565 list_del(&io->log_sibling);
566 spin_unlock(&log->io_list_lock);
567
568 mempool_free(io, &ppl_conf->io_pool);
569
570 spin_lock(&ppl_conf->no_mem_stripes_lock);
571 if (!list_empty(&ppl_conf->no_mem_stripes)) {
572 struct stripe_head *sh;
573
574 sh = list_first_entry(&ppl_conf->no_mem_stripes,
575 struct stripe_head, log_list);
576 list_del_init(&sh->log_list);
577 set_bit(STRIPE_HANDLE, &sh->state);
578 raid5_release_stripe(sh);
579 }
580 spin_unlock(&ppl_conf->no_mem_stripes_lock);
581
582 local_irq_restore(flags);
583
584 wake_up(&conf->wait_for_quiescent);
585}
586
587static void ppl_flush_endio(struct bio *bio)
588{
589 struct ppl_io_unit *io = bio->bi_private;
590 struct ppl_log *log = io->log;
591 struct ppl_conf *ppl_conf = log->ppl_conf;
592 struct r5conf *conf = ppl_conf->mddev->private;
593 char b[BDEVNAME_SIZE];
594
595 pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
596
597 if (bio->bi_status) {
598 struct md_rdev *rdev;
599
600 rcu_read_lock();
601 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
602 if (rdev)
603 md_error(rdev->mddev, rdev);
604 rcu_read_unlock();
605 }
606
607 bio_put(bio);
608
609 if (atomic_dec_and_test(&io->pending_flushes)) {
610 ppl_io_unit_finished(io);
611 md_wakeup_thread(conf->mddev->thread);
612 }
613}
614
615static void ppl_do_flush(struct ppl_io_unit *io)
616{
617 struct ppl_log *log = io->log;
618 struct ppl_conf *ppl_conf = log->ppl_conf;
619 struct r5conf *conf = ppl_conf->mddev->private;
620 int raid_disks = conf->raid_disks;
621 int flushed_disks = 0;
622 int i;
623
624 atomic_set(&io->pending_flushes, raid_disks);
625
626 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
627 struct md_rdev *rdev;
628 struct block_device *bdev = NULL;
629
630 rcu_read_lock();
631 rdev = rcu_dereference(conf->disks[i].rdev);
632 if (rdev && !test_bit(Faulty, &rdev->flags))
633 bdev = rdev->bdev;
634 rcu_read_unlock();
635
636 if (bdev) {
637 struct bio *bio;
638 char b[BDEVNAME_SIZE];
639
640 bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
641 bio_set_dev(bio, bdev);
642 bio->bi_private = io;
643 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
644 bio->bi_end_io = ppl_flush_endio;
645
646 pr_debug("%s: dev: %s\n", __func__,
647 bio_devname(bio, b));
648
649 submit_bio(bio);
650 flushed_disks++;
651 }
652 }
653
654 log->disk_flush_bitmap = 0;
655
656 for (i = flushed_disks ; i < raid_disks; i++) {
657 if (atomic_dec_and_test(&io->pending_flushes))
658 ppl_io_unit_finished(io);
659 }
660}
661
662static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
663 struct ppl_log *log)
664{
665 struct ppl_io_unit *io;
666
667 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
668 log_sibling);
669
670 return !io || !io->submitted;
671}
672
673void ppl_quiesce(struct r5conf *conf, int quiesce)
674{
675 struct ppl_conf *ppl_conf = conf->log_private;
676 int i;
677
678 if (quiesce) {
679 for (i = 0; i < ppl_conf->count; i++) {
680 struct ppl_log *log = &ppl_conf->child_logs[i];
681
682 spin_lock_irq(&log->io_list_lock);
683 wait_event_lock_irq(conf->wait_for_quiescent,
684 ppl_no_io_unit_submitted(conf, log),
685 log->io_list_lock);
686 spin_unlock_irq(&log->io_list_lock);
687 }
688 }
689}
690
691int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
692{
693 if (bio->bi_iter.bi_size == 0) {
694 bio_endio(bio);
695 return 0;
696 }
697 bio->bi_opf &= ~REQ_PREFLUSH;
698 return -EAGAIN;
699}
700
701void ppl_stripe_write_finished(struct stripe_head *sh)
702{
703 struct ppl_io_unit *io;
704
705 io = sh->ppl_io;
706 sh->ppl_io = NULL;
707
708 if (io && atomic_dec_and_test(&io->pending_stripes)) {
709 if (io->log->disk_flush_bitmap)
710 ppl_do_flush(io);
711 else
712 ppl_io_unit_finished(io);
713 }
714}
715
716static void ppl_xor(int size, struct page *page1, struct page *page2)
717{
718 struct async_submit_ctl submit;
719 struct dma_async_tx_descriptor *tx;
720 struct page *xor_srcs[] = { page1, page2 };
721
722 init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
723 NULL, NULL, NULL, NULL);
724 tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
725
726 async_tx_quiesce(&tx);
727}
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
796 sector_t ppl_sector)
797{
798 struct ppl_conf *ppl_conf = log->ppl_conf;
799 struct mddev *mddev = ppl_conf->mddev;
800 struct r5conf *conf = mddev->private;
801 int block_size = ppl_conf->block_size;
802 struct page *page1;
803 struct page *page2;
804 sector_t r_sector_first;
805 sector_t r_sector_last;
806 int strip_sectors;
807 int data_disks;
808 int i;
809 int ret = 0;
810 char b[BDEVNAME_SIZE];
811 unsigned int pp_size = le32_to_cpu(e->pp_size);
812 unsigned int data_size = le32_to_cpu(e->data_size);
813
814 page1 = alloc_page(GFP_KERNEL);
815 page2 = alloc_page(GFP_KERNEL);
816
817 if (!page1 || !page2) {
818 ret = -ENOMEM;
819 goto out;
820 }
821
822 r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
823
824 if ((pp_size >> 9) < conf->chunk_sectors) {
825 if (pp_size > 0) {
826 data_disks = data_size / pp_size;
827 strip_sectors = pp_size >> 9;
828 } else {
829 data_disks = conf->raid_disks - conf->max_degraded;
830 strip_sectors = (data_size >> 9) / data_disks;
831 }
832 r_sector_last = r_sector_first +
833 (data_disks - 1) * conf->chunk_sectors +
834 strip_sectors;
835 } else {
836 data_disks = conf->raid_disks - conf->max_degraded;
837 strip_sectors = conf->chunk_sectors;
838 r_sector_last = r_sector_first + (data_size >> 9);
839 }
840
841 pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
842 (unsigned long long)r_sector_first,
843 (unsigned long long)r_sector_last);
844
845
846 if (block_size == 512 &&
847 (r_sector_first & (STRIPE_SECTORS - 1)) == 0 &&
848 (r_sector_last & (STRIPE_SECTORS - 1)) == 0)
849 block_size = STRIPE_SIZE;
850
851
852 for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
853 bool update_parity = false;
854 sector_t parity_sector;
855 struct md_rdev *parity_rdev;
856 struct stripe_head sh;
857 int disk;
858 int indent = 0;
859
860 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
861 indent += 2;
862
863 memset(page_address(page1), 0, PAGE_SIZE);
864
865
866 for (disk = 0; disk < data_disks; disk++) {
867 int dd_idx;
868 struct md_rdev *rdev;
869 sector_t sector;
870 sector_t r_sector = r_sector_first + i +
871 (disk * conf->chunk_sectors);
872
873 pr_debug("%s:%*s data member disk %d start\n",
874 __func__, indent, "", disk);
875 indent += 2;
876
877 if (r_sector >= r_sector_last) {
878 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
879 __func__, indent, "",
880 (unsigned long long)r_sector);
881 indent -= 2;
882 continue;
883 }
884
885 update_parity = true;
886
887
888 sector = raid5_compute_sector(conf, r_sector, 0,
889 &dd_idx, NULL);
890 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
891 __func__, indent, "",
892 (unsigned long long)r_sector, dd_idx,
893 (unsigned long long)sector);
894
895 rdev = conf->disks[dd_idx].rdev;
896 if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
897 sector >= rdev->recovery_offset)) {
898 pr_debug("%s:%*s data member disk %d missing\n",
899 __func__, indent, "", dd_idx);
900 update_parity = false;
901 break;
902 }
903
904 pr_debug("%s:%*s reading data member disk %s sector %llu\n",
905 __func__, indent, "", bdevname(rdev->bdev, b),
906 (unsigned long long)sector);
907 if (!sync_page_io(rdev, sector, block_size, page2,
908 REQ_OP_READ, 0, false)) {
909 md_error(mddev, rdev);
910 pr_debug("%s:%*s read failed!\n", __func__,
911 indent, "");
912 ret = -EIO;
913 goto out;
914 }
915
916 ppl_xor(block_size, page1, page2);
917
918 indent -= 2;
919 }
920
921 if (!update_parity)
922 continue;
923
924 if (pp_size > 0) {
925 pr_debug("%s:%*s reading pp disk sector %llu\n",
926 __func__, indent, "",
927 (unsigned long long)(ppl_sector + i));
928 if (!sync_page_io(log->rdev,
929 ppl_sector - log->rdev->data_offset + i,
930 block_size, page2, REQ_OP_READ, 0,
931 false)) {
932 pr_debug("%s:%*s read failed!\n", __func__,
933 indent, "");
934 md_error(mddev, log->rdev);
935 ret = -EIO;
936 goto out;
937 }
938
939 ppl_xor(block_size, page1, page2);
940 }
941
942
943 parity_sector = raid5_compute_sector(conf, r_sector_first + i,
944 0, &disk, &sh);
945 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
946 parity_rdev = conf->disks[sh.pd_idx].rdev;
947
948 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
949 pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
950 __func__, indent, "",
951 (unsigned long long)parity_sector,
952 bdevname(parity_rdev->bdev, b));
953 if (!sync_page_io(parity_rdev, parity_sector, block_size,
954 page1, REQ_OP_WRITE, 0, false)) {
955 pr_debug("%s:%*s parity write error!\n", __func__,
956 indent, "");
957 md_error(mddev, parity_rdev);
958 ret = -EIO;
959 goto out;
960 }
961 }
962out:
963 if (page1)
964 __free_page(page1);
965 if (page2)
966 __free_page(page2);
967 return ret;
968}
969
970static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
971 sector_t offset)
972{
973 struct ppl_conf *ppl_conf = log->ppl_conf;
974 struct md_rdev *rdev = log->rdev;
975 struct mddev *mddev = rdev->mddev;
976 sector_t ppl_sector = rdev->ppl.sector + offset +
977 (PPL_HEADER_SIZE >> 9);
978 struct page *page;
979 int i;
980 int ret = 0;
981
982 page = alloc_page(GFP_KERNEL);
983 if (!page)
984 return -ENOMEM;
985
986
987 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
988 struct ppl_header_entry *e = &pplhdr->entries[i];
989 u32 pp_size = le32_to_cpu(e->pp_size);
990 sector_t sector = ppl_sector;
991 int ppl_entry_sectors = pp_size >> 9;
992 u32 crc, crc_stored;
993
994 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
995 __func__, rdev->raid_disk, i,
996 (unsigned long long)ppl_sector, pp_size);
997
998 crc = ~0;
999 crc_stored = le32_to_cpu(e->checksum);
1000
1001
1002 while (pp_size) {
1003 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
1004
1005 if (!sync_page_io(rdev, sector - rdev->data_offset,
1006 s, page, REQ_OP_READ, 0, false)) {
1007 md_error(mddev, rdev);
1008 ret = -EIO;
1009 goto out;
1010 }
1011
1012 crc = crc32c_le(crc, page_address(page), s);
1013
1014 pp_size -= s;
1015 sector += s >> 9;
1016 }
1017
1018 crc = ~crc;
1019
1020 if (crc != crc_stored) {
1021
1022
1023
1024
1025
1026 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1027 __func__, crc_stored, crc);
1028 ppl_conf->mismatch_count++;
1029 } else {
1030 ret = ppl_recover_entry(log, e, ppl_sector);
1031 if (ret)
1032 goto out;
1033 ppl_conf->recovered_entries++;
1034 }
1035
1036 ppl_sector += ppl_entry_sectors;
1037 }
1038
1039
1040 ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
1041out:
1042 __free_page(page);
1043 return ret;
1044}
1045
1046static int ppl_write_empty_header(struct ppl_log *log)
1047{
1048 struct page *page;
1049 struct ppl_header *pplhdr;
1050 struct md_rdev *rdev = log->rdev;
1051 int ret = 0;
1052
1053 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1054 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1055
1056 page = alloc_page(GFP_NOIO | __GFP_ZERO);
1057 if (!page)
1058 return -ENOMEM;
1059
1060 pplhdr = page_address(page);
1061
1062 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1063 log->rdev->ppl.size, GFP_NOIO, 0);
1064 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1065 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1066 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1067
1068 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1069 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1070 REQ_FUA, 0, false)) {
1071 md_error(rdev->mddev, rdev);
1072 ret = -EIO;
1073 }
1074
1075 __free_page(page);
1076 return ret;
1077}
1078
1079static int ppl_load_distributed(struct ppl_log *log)
1080{
1081 struct ppl_conf *ppl_conf = log->ppl_conf;
1082 struct md_rdev *rdev = log->rdev;
1083 struct mddev *mddev = rdev->mddev;
1084 struct page *page, *page2, *tmp;
1085 struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1086 u32 crc, crc_stored;
1087 u32 signature;
1088 int ret = 0, i;
1089 sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1090
1091 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1092
1093 page = alloc_page(GFP_KERNEL);
1094 if (!page)
1095 return -ENOMEM;
1096
1097 page2 = alloc_page(GFP_KERNEL);
1098 if (!page2) {
1099 __free_page(page);
1100 return -ENOMEM;
1101 }
1102
1103
1104 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1105 if (!sync_page_io(rdev,
1106 rdev->ppl.sector - rdev->data_offset +
1107 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1108 0, false)) {
1109 md_error(mddev, rdev);
1110 ret = -EIO;
1111
1112 pplhdr = NULL;
1113 break;
1114 }
1115 pplhdr = page_address(page);
1116
1117
1118 crc_stored = le32_to_cpu(pplhdr->checksum);
1119 pplhdr->checksum = 0;
1120 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1121
1122 if (crc_stored != crc) {
1123 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1124 __func__, crc_stored, crc,
1125 (unsigned long long)pplhdr_offset);
1126 pplhdr = prev_pplhdr;
1127 pplhdr_offset = prev_pplhdr_offset;
1128 break;
1129 }
1130
1131 signature = le32_to_cpu(pplhdr->signature);
1132
1133 if (mddev->external) {
1134
1135
1136
1137
1138 ppl_conf->signature = signature;
1139 } else if (ppl_conf->signature != signature) {
1140 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1141 __func__, signature, ppl_conf->signature,
1142 (unsigned long long)pplhdr_offset);
1143 pplhdr = prev_pplhdr;
1144 pplhdr_offset = prev_pplhdr_offset;
1145 break;
1146 }
1147
1148 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1149 le64_to_cpu(pplhdr->generation)) {
1150
1151 pplhdr = prev_pplhdr;
1152 pplhdr_offset = prev_pplhdr_offset;
1153 break;
1154 }
1155
1156 prev_pplhdr_offset = pplhdr_offset;
1157 prev_pplhdr = pplhdr;
1158
1159 tmp = page;
1160 page = page2;
1161 page2 = tmp;
1162
1163
1164 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1165 pplhdr_offset +=
1166 le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1167 pplhdr_offset += PPL_HEADER_SIZE >> 9;
1168 }
1169
1170
1171 if (!pplhdr)
1172 ppl_conf->mismatch_count++;
1173 else
1174 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1175 __func__, (unsigned long long)pplhdr_offset,
1176 le64_to_cpu(pplhdr->generation));
1177
1178
1179 if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1180 ret = ppl_recover(log, pplhdr, pplhdr_offset);
1181
1182
1183 if (!ret && !mddev->pers)
1184 ret = ppl_write_empty_header(log);
1185
1186 __free_page(page);
1187 __free_page(page2);
1188
1189 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1190 __func__, ret, ppl_conf->mismatch_count,
1191 ppl_conf->recovered_entries);
1192 return ret;
1193}
1194
1195static int ppl_load(struct ppl_conf *ppl_conf)
1196{
1197 int ret = 0;
1198 u32 signature = 0;
1199 bool signature_set = false;
1200 int i;
1201
1202 for (i = 0; i < ppl_conf->count; i++) {
1203 struct ppl_log *log = &ppl_conf->child_logs[i];
1204
1205
1206 if (!log->rdev)
1207 continue;
1208
1209 ret = ppl_load_distributed(log);
1210 if (ret)
1211 break;
1212
1213
1214
1215
1216
1217
1218 if (ppl_conf->mddev->external) {
1219 if (!signature_set) {
1220 signature = ppl_conf->signature;
1221 signature_set = true;
1222 } else if (signature != ppl_conf->signature) {
1223 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1224 mdname(ppl_conf->mddev));
1225 ret = -EINVAL;
1226 break;
1227 }
1228 }
1229 }
1230
1231 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1232 __func__, ret, ppl_conf->mismatch_count,
1233 ppl_conf->recovered_entries);
1234 return ret;
1235}
1236
1237static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1238{
1239 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1240 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1241
1242 kfree(ppl_conf->child_logs);
1243
1244 bioset_exit(&ppl_conf->bs);
1245 bioset_exit(&ppl_conf->flush_bs);
1246 mempool_exit(&ppl_conf->io_pool);
1247 kmem_cache_destroy(ppl_conf->io_kc);
1248
1249 kfree(ppl_conf);
1250}
1251
1252void ppl_exit_log(struct r5conf *conf)
1253{
1254 struct ppl_conf *ppl_conf = conf->log_private;
1255
1256 if (ppl_conf) {
1257 __ppl_exit_log(ppl_conf);
1258 conf->log_private = NULL;
1259 }
1260}
1261
1262static int ppl_validate_rdev(struct md_rdev *rdev)
1263{
1264 char b[BDEVNAME_SIZE];
1265 int ppl_data_sectors;
1266 int ppl_size_new;
1267
1268
1269
1270
1271
1272
1273
1274 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1275
1276 if (ppl_data_sectors > 0)
1277 ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS);
1278
1279 if (ppl_data_sectors <= 0) {
1280 pr_warn("md/raid:%s: PPL space too small on %s\n",
1281 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1282 return -ENOSPC;
1283 }
1284
1285 ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1286
1287 if ((rdev->ppl.sector < rdev->data_offset &&
1288 rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1289 (rdev->ppl.sector >= rdev->data_offset &&
1290 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1291 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1292 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1293 return -EINVAL;
1294 }
1295
1296 if (!rdev->mddev->external &&
1297 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1298 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1299 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1300 mdname(rdev->mddev), bdevname(rdev->bdev, b));
1301 return -EINVAL;
1302 }
1303
1304 rdev->ppl.size = ppl_size_new;
1305
1306 return 0;
1307}
1308
1309static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1310{
1311 struct request_queue *q;
1312
1313 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1314 PPL_HEADER_SIZE) * 2) {
1315 log->use_multippl = true;
1316 set_bit(MD_HAS_MULTIPLE_PPLS,
1317 &log->ppl_conf->mddev->flags);
1318 log->entry_space = PPL_SPACE_SIZE;
1319 } else {
1320 log->use_multippl = false;
1321 log->entry_space = (log->rdev->ppl.size << 9) -
1322 PPL_HEADER_SIZE;
1323 }
1324 log->next_io_sector = rdev->ppl.sector;
1325
1326 q = bdev_get_queue(rdev->bdev);
1327 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1328 log->wb_cache_on = true;
1329}
1330
1331int ppl_init_log(struct r5conf *conf)
1332{
1333 struct ppl_conf *ppl_conf;
1334 struct mddev *mddev = conf->mddev;
1335 int ret = 0;
1336 int max_disks;
1337 int i;
1338
1339 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1340 mdname(conf->mddev));
1341
1342 if (PAGE_SIZE != 4096)
1343 return -EINVAL;
1344
1345 if (mddev->level != 5) {
1346 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1347 mdname(mddev), mddev->level);
1348 return -EINVAL;
1349 }
1350
1351 if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1352 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1353 mdname(mddev));
1354 return -EINVAL;
1355 }
1356
1357 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1358 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1359 mdname(mddev));
1360 return -EINVAL;
1361 }
1362
1363 max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1364 BITS_PER_BYTE;
1365 if (conf->raid_disks > max_disks) {
1366 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1367 mdname(mddev), max_disks);
1368 return -EINVAL;
1369 }
1370
1371 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1372 if (!ppl_conf)
1373 return -ENOMEM;
1374
1375 ppl_conf->mddev = mddev;
1376
1377 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1378 if (!ppl_conf->io_kc) {
1379 ret = -ENOMEM;
1380 goto err;
1381 }
1382
1383 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1384 ppl_io_pool_free, ppl_conf->io_kc);
1385 if (ret)
1386 goto err;
1387
1388 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1389 if (ret)
1390 goto err;
1391
1392 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1393 if (ret)
1394 goto err;
1395
1396 ppl_conf->count = conf->raid_disks;
1397 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1398 GFP_KERNEL);
1399 if (!ppl_conf->child_logs) {
1400 ret = -ENOMEM;
1401 goto err;
1402 }
1403
1404 atomic64_set(&ppl_conf->seq, 0);
1405 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1406 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1407 ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
1408
1409 if (!mddev->external) {
1410 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1411 ppl_conf->block_size = 512;
1412 } else {
1413 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1414 }
1415
1416 for (i = 0; i < ppl_conf->count; i++) {
1417 struct ppl_log *log = &ppl_conf->child_logs[i];
1418 struct md_rdev *rdev = conf->disks[i].rdev;
1419
1420 mutex_init(&log->io_mutex);
1421 spin_lock_init(&log->io_list_lock);
1422 INIT_LIST_HEAD(&log->io_list);
1423
1424 log->ppl_conf = ppl_conf;
1425 log->rdev = rdev;
1426
1427 if (rdev) {
1428 ret = ppl_validate_rdev(rdev);
1429 if (ret)
1430 goto err;
1431
1432 ppl_init_child_log(log, rdev);
1433 }
1434 }
1435
1436
1437 ret = ppl_load(ppl_conf);
1438
1439 if (ret) {
1440 goto err;
1441 } else if (!mddev->pers && mddev->recovery_cp == 0 &&
1442 ppl_conf->recovered_entries > 0 &&
1443 ppl_conf->mismatch_count == 0) {
1444
1445
1446
1447
1448 mddev->recovery_cp = MaxSector;
1449 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1450 } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1451
1452 ret = -EINVAL;
1453 goto err;
1454 }
1455
1456 conf->log_private = ppl_conf;
1457 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1458
1459 return 0;
1460err:
1461 __ppl_exit_log(ppl_conf);
1462 return ret;
1463}
1464
1465int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1466{
1467 struct ppl_conf *ppl_conf = conf->log_private;
1468 struct ppl_log *log;
1469 int ret = 0;
1470 char b[BDEVNAME_SIZE];
1471
1472 if (!rdev)
1473 return -EINVAL;
1474
1475 pr_debug("%s: disk: %d operation: %s dev: %s\n",
1476 __func__, rdev->raid_disk, add ? "add" : "remove",
1477 bdevname(rdev->bdev, b));
1478
1479 if (rdev->raid_disk < 0)
1480 return 0;
1481
1482 if (rdev->raid_disk >= ppl_conf->count)
1483 return -ENODEV;
1484
1485 log = &ppl_conf->child_logs[rdev->raid_disk];
1486
1487 mutex_lock(&log->io_mutex);
1488 if (add) {
1489 ret = ppl_validate_rdev(rdev);
1490 if (!ret) {
1491 log->rdev = rdev;
1492 ret = ppl_write_empty_header(log);
1493 ppl_init_child_log(log, rdev);
1494 }
1495 } else {
1496 log->rdev = NULL;
1497 }
1498 mutex_unlock(&log->io_mutex);
1499
1500 return ret;
1501}
1502
1503static ssize_t
1504ppl_write_hint_show(struct mddev *mddev, char *buf)
1505{
1506 size_t ret = 0;
1507 struct r5conf *conf;
1508 struct ppl_conf *ppl_conf = NULL;
1509
1510 spin_lock(&mddev->lock);
1511 conf = mddev->private;
1512 if (conf && raid5_has_ppl(conf))
1513 ppl_conf = conf->log_private;
1514 ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1515 spin_unlock(&mddev->lock);
1516
1517 return ret;
1518}
1519
1520static ssize_t
1521ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1522{
1523 struct r5conf *conf;
1524 struct ppl_conf *ppl_conf;
1525 int err = 0;
1526 unsigned short new;
1527
1528 if (len >= PAGE_SIZE)
1529 return -EINVAL;
1530 if (kstrtou16(page, 10, &new))
1531 return -EINVAL;
1532
1533 err = mddev_lock(mddev);
1534 if (err)
1535 return err;
1536
1537 conf = mddev->private;
1538 if (!conf) {
1539 err = -ENODEV;
1540 } else if (raid5_has_ppl(conf)) {
1541 ppl_conf = conf->log_private;
1542 if (!ppl_conf)
1543 err = -EINVAL;
1544 else
1545 ppl_conf->write_hint = new;
1546 } else {
1547 err = -EINVAL;
1548 }
1549
1550 mddev_unlock(mddev);
1551
1552 return err ?: len;
1553}
1554
1555struct md_sysfs_entry
1556ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1557 ppl_write_hint_show,
1558 ppl_write_hint_store);
1559