1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/dax.h>
21#include <linux/fs.h>
22#include <linux/genhd.h>
23#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
26#include <linux/mutex.h>
27#include <linux/pagevec.h>
28#include <linux/sched.h>
29#include <linux/sched/signal.h>
30#include <linux/uio.h>
31#include <linux/vmstat.h>
32#include <linux/pfn_t.h>
33#include <linux/sizes.h>
34#include <linux/mmu_notifier.h>
35#include <linux/iomap.h>
36#include "internal.h"
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
41
42#define DAX_WAIT_TABLE_BITS 12
43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45
46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47
48static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
49
50static int __init init_dax_wait_table(void)
51{
52 int i;
53
54 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
55 init_waitqueue_head(wait_table + i);
56 return 0;
57}
58fs_initcall(init_dax_wait_table);
59
60
61
62
63
64
65
66
67
68
69#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
70#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
71#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
72#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
73#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
74
75static unsigned long dax_radix_sector(void *entry)
76{
77 return (unsigned long)entry >> RADIX_DAX_SHIFT;
78}
79
80static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
81{
82 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
83 ((unsigned long)sector << RADIX_DAX_SHIFT) |
84 RADIX_DAX_ENTRY_LOCK);
85}
86
87static unsigned int dax_radix_order(void *entry)
88{
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
92}
93
94static int dax_is_pmd_entry(void *entry)
95{
96 return (unsigned long)entry & RADIX_DAX_PMD;
97}
98
99static int dax_is_pte_entry(void *entry)
100{
101 return !((unsigned long)entry & RADIX_DAX_PMD);
102}
103
104static int dax_is_zero_entry(void *entry)
105{
106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107}
108
109static int dax_is_empty_entry(void *entry)
110{
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
112}
113
114
115
116
117struct exceptional_entry_key {
118 struct address_space *mapping;
119 pgoff_t entry_start;
120};
121
122struct wait_exceptional_entry_queue {
123 wait_queue_entry_t wait;
124 struct exceptional_entry_key key;
125};
126
127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
129{
130 unsigned long hash;
131
132
133
134
135
136
137 if (dax_is_pmd_entry(entry))
138 index &= ~PG_PMD_COLOUR;
139
140 key->mapping = mapping;
141 key->entry_start = index;
142
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
145}
146
147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 int sync, void *keyp)
149{
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
153
154 if (key->mapping != ewait->key.mapping ||
155 key->entry_start != ewait->key.entry_start)
156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
158}
159
160
161
162
163
164
165
166
167static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
168 pgoff_t index, void *entry, bool wake_all)
169{
170 struct exceptional_entry_key key;
171 wait_queue_head_t *wq;
172
173 wq = dax_entry_waitqueue(mapping, index, entry, &key);
174
175
176
177
178
179
180
181 if (waitqueue_active(wq))
182 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
183}
184
185
186
187
188
189static inline int slot_locked(struct address_space *mapping, void **slot)
190{
191 unsigned long entry = (unsigned long)
192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
193 return entry & RADIX_DAX_ENTRY_LOCK;
194}
195
196
197
198
199
200static inline void *lock_slot(struct address_space *mapping, void **slot)
201{
202 unsigned long entry = (unsigned long)
203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
204
205 entry |= RADIX_DAX_ENTRY_LOCK;
206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
207 return (void *)entry;
208}
209
210
211
212
213
214static inline void *unlock_slot(struct address_space *mapping, void **slot)
215{
216 unsigned long entry = (unsigned long)
217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
218
219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
221 return (void *)entry;
222}
223
224
225
226
227
228
229
230
231
232
233static void *get_unlocked_mapping_entry(struct address_space *mapping,
234 pgoff_t index, void ***slotp)
235{
236 void *entry, **slot;
237 struct wait_exceptional_entry_queue ewait;
238 wait_queue_head_t *wq;
239
240 init_wait(&ewait.wait);
241 ewait.wait.func = wake_exceptional_entry_func;
242
243 for (;;) {
244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
245 &slot);
246 if (!entry ||
247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
248 !slot_locked(mapping, slot)) {
249 if (slotp)
250 *slotp = slot;
251 return entry;
252 }
253
254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
255 prepare_to_wait_exclusive(wq, &ewait.wait,
256 TASK_UNINTERRUPTIBLE);
257 spin_unlock_irq(&mapping->tree_lock);
258 schedule();
259 finish_wait(wq, &ewait.wait);
260 spin_lock_irq(&mapping->tree_lock);
261 }
262}
263
264static void dax_unlock_mapping_entry(struct address_space *mapping,
265 pgoff_t index)
266{
267 void *entry, **slot;
268
269 spin_lock_irq(&mapping->tree_lock);
270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
272 !slot_locked(mapping, slot))) {
273 spin_unlock_irq(&mapping->tree_lock);
274 return;
275 }
276 unlock_slot(mapping, slot);
277 spin_unlock_irq(&mapping->tree_lock);
278 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
279}
280
281static void put_locked_mapping_entry(struct address_space *mapping,
282 pgoff_t index)
283{
284 dax_unlock_mapping_entry(mapping, index);
285}
286
287
288
289
290
291static void put_unlocked_mapping_entry(struct address_space *mapping,
292 pgoff_t index, void *entry)
293{
294 if (!entry)
295 return;
296
297
298 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
328 unsigned long size_flag)
329{
330 bool pmd_downgrade = false;
331 void *entry, **slot;
332
333restart:
334 spin_lock_irq(&mapping->tree_lock);
335 entry = get_unlocked_mapping_entry(mapping, index, &slot);
336
337 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
338 entry = ERR_PTR(-EIO);
339 goto out_unlock;
340 }
341
342 if (entry) {
343 if (size_flag & RADIX_DAX_PMD) {
344 if (dax_is_pte_entry(entry)) {
345 put_unlocked_mapping_entry(mapping, index,
346 entry);
347 entry = ERR_PTR(-EEXIST);
348 goto out_unlock;
349 }
350 } else {
351 if (dax_is_pmd_entry(entry) &&
352 (dax_is_zero_entry(entry) ||
353 dax_is_empty_entry(entry))) {
354 pmd_downgrade = true;
355 }
356 }
357 }
358
359
360 if (!entry || pmd_downgrade) {
361 int err;
362
363 if (pmd_downgrade) {
364
365
366
367
368 entry = lock_slot(mapping, slot);
369 }
370
371 spin_unlock_irq(&mapping->tree_lock);
372
373
374
375
376
377 if (pmd_downgrade && dax_is_zero_entry(entry))
378 unmap_mapping_range(mapping,
379 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
380
381 err = radix_tree_preload(
382 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
383 if (err) {
384 if (pmd_downgrade)
385 put_locked_mapping_entry(mapping, index);
386 return ERR_PTR(err);
387 }
388 spin_lock_irq(&mapping->tree_lock);
389
390 if (!entry) {
391
392
393
394
395
396
397 entry = __radix_tree_lookup(&mapping->page_tree, index,
398 NULL, &slot);
399 if (entry) {
400 radix_tree_preload_end();
401 spin_unlock_irq(&mapping->tree_lock);
402 goto restart;
403 }
404 }
405
406 if (pmd_downgrade) {
407 radix_tree_delete(&mapping->page_tree, index);
408 mapping->nrexceptional--;
409 dax_wake_mapping_entry_waiter(mapping, index, entry,
410 true);
411 }
412
413 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
414
415 err = __radix_tree_insert(&mapping->page_tree, index,
416 dax_radix_order(entry), entry);
417 radix_tree_preload_end();
418 if (err) {
419 spin_unlock_irq(&mapping->tree_lock);
420
421
422
423
424
425
426
427
428 return ERR_PTR(err);
429 }
430
431 mapping->nrexceptional++;
432 spin_unlock_irq(&mapping->tree_lock);
433 return entry;
434 }
435 entry = lock_slot(mapping, slot);
436 out_unlock:
437 spin_unlock_irq(&mapping->tree_lock);
438 return entry;
439}
440
441static int __dax_invalidate_mapping_entry(struct address_space *mapping,
442 pgoff_t index, bool trunc)
443{
444 int ret = 0;
445 void *entry;
446 struct radix_tree_root *page_tree = &mapping->page_tree;
447
448 spin_lock_irq(&mapping->tree_lock);
449 entry = get_unlocked_mapping_entry(mapping, index, NULL);
450 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
451 goto out;
452 if (!trunc &&
453 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
454 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
455 goto out;
456 radix_tree_delete(page_tree, index);
457 mapping->nrexceptional--;
458 ret = 1;
459out:
460 put_unlocked_mapping_entry(mapping, index, entry);
461 spin_unlock_irq(&mapping->tree_lock);
462 return ret;
463}
464
465
466
467
468int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
469{
470 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
471
472
473
474
475
476
477
478
479 WARN_ON_ONCE(!ret);
480 return ret;
481}
482
483
484
485
486int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
487 pgoff_t index)
488{
489 return __dax_invalidate_mapping_entry(mapping, index, false);
490}
491
492static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
493 sector_t sector, size_t size, struct page *to,
494 unsigned long vaddr)
495{
496 void *vto, *kaddr;
497 pgoff_t pgoff;
498 pfn_t pfn;
499 long rc;
500 int id;
501
502 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
503 if (rc)
504 return rc;
505
506 id = dax_read_lock();
507 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
508 if (rc < 0) {
509 dax_read_unlock(id);
510 return rc;
511 }
512 vto = kmap_atomic(to);
513 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
514 kunmap_atomic(vto);
515 dax_read_unlock(id);
516 return 0;
517}
518
519
520
521
522
523
524
525
526static void *dax_insert_mapping_entry(struct address_space *mapping,
527 struct vm_fault *vmf,
528 void *entry, sector_t sector,
529 unsigned long flags)
530{
531 struct radix_tree_root *page_tree = &mapping->page_tree;
532 void *new_entry;
533 pgoff_t index = vmf->pgoff;
534
535 if (vmf->flags & FAULT_FLAG_WRITE)
536 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
537
538 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
539
540 if (dax_is_pmd_entry(entry))
541 unmap_mapping_range(mapping,
542 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
543 PMD_SIZE, 0);
544 else
545 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
546 PAGE_SIZE, 0);
547 }
548
549 spin_lock_irq(&mapping->tree_lock);
550 new_entry = dax_radix_locked_entry(sector, flags);
551
552 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
553
554
555
556
557
558
559
560
561 struct radix_tree_node *node;
562 void **slot;
563 void *ret;
564
565 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
566 WARN_ON_ONCE(ret != entry);
567 __radix_tree_replace(page_tree, node, slot,
568 new_entry, NULL, NULL);
569 entry = new_entry;
570 }
571
572 if (vmf->flags & FAULT_FLAG_WRITE)
573 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
574
575 spin_unlock_irq(&mapping->tree_lock);
576 return entry;
577}
578
579static inline unsigned long
580pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
581{
582 unsigned long address;
583
584 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
585 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
586 return address;
587}
588
589
590static void dax_mapping_entry_mkclean(struct address_space *mapping,
591 pgoff_t index, unsigned long pfn)
592{
593 struct vm_area_struct *vma;
594 pte_t pte, *ptep = NULL;
595 pmd_t *pmdp = NULL;
596 spinlock_t *ptl;
597
598 i_mmap_lock_read(mapping);
599 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
600 unsigned long address, start, end;
601
602 cond_resched();
603
604 if (!(vma->vm_flags & VM_SHARED))
605 continue;
606
607 address = pgoff_address(index, vma);
608
609
610
611
612
613
614 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
615 continue;
616
617 if (pmdp) {
618#ifdef CONFIG_FS_DAX_PMD
619 pmd_t pmd;
620
621 if (pfn != pmd_pfn(*pmdp))
622 goto unlock_pmd;
623 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
624 goto unlock_pmd;
625
626 flush_cache_page(vma, address, pfn);
627 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
628 pmd = pmd_wrprotect(pmd);
629 pmd = pmd_mkclean(pmd);
630 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
631 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
632unlock_pmd:
633 spin_unlock(ptl);
634#endif
635 } else {
636 if (pfn != pte_pfn(*ptep))
637 goto unlock_pte;
638 if (!pte_dirty(*ptep) && !pte_write(*ptep))
639 goto unlock_pte;
640
641 flush_cache_page(vma, address, pfn);
642 pte = ptep_clear_flush(vma, address, ptep);
643 pte = pte_wrprotect(pte);
644 pte = pte_mkclean(pte);
645 set_pte_at(vma->vm_mm, address, ptep, pte);
646 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
647unlock_pte:
648 pte_unmap_unlock(ptep, ptl);
649 }
650
651 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
652 }
653 i_mmap_unlock_read(mapping);
654}
655
656static int dax_writeback_one(struct block_device *bdev,
657 struct dax_device *dax_dev, struct address_space *mapping,
658 pgoff_t index, void *entry)
659{
660 struct radix_tree_root *page_tree = &mapping->page_tree;
661 void *entry2, **slot, *kaddr;
662 long ret = 0, id;
663 sector_t sector;
664 pgoff_t pgoff;
665 size_t size;
666 pfn_t pfn;
667
668
669
670
671
672 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
673 return -EIO;
674
675 spin_lock_irq(&mapping->tree_lock);
676 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
677
678 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
679 goto put_unlocked;
680
681
682
683
684
685 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
686 goto put_unlocked;
687 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
688 dax_is_zero_entry(entry))) {
689 ret = -EIO;
690 goto put_unlocked;
691 }
692
693
694 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
695 goto put_unlocked;
696
697 entry = lock_slot(mapping, slot);
698
699
700
701
702
703
704
705 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
706 spin_unlock_irq(&mapping->tree_lock);
707
708
709
710
711
712
713
714
715 sector = dax_radix_sector(entry);
716 size = PAGE_SIZE << dax_radix_order(entry);
717
718 id = dax_read_lock();
719 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
720 if (ret)
721 goto dax_unlock;
722
723
724
725
726
727 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
728 if (ret < 0)
729 goto dax_unlock;
730
731 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
732 ret = -EIO;
733 goto dax_unlock;
734 }
735
736 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
737 dax_flush(dax_dev, kaddr, size);
738
739
740
741
742
743
744 spin_lock_irq(&mapping->tree_lock);
745 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
746 spin_unlock_irq(&mapping->tree_lock);
747 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
748 dax_unlock:
749 dax_read_unlock(id);
750 put_locked_mapping_entry(mapping, index);
751 return ret;
752
753 put_unlocked:
754 put_unlocked_mapping_entry(mapping, index, entry2);
755 spin_unlock_irq(&mapping->tree_lock);
756 return ret;
757}
758
759
760
761
762
763
764int dax_writeback_mapping_range(struct address_space *mapping,
765 struct block_device *bdev, struct writeback_control *wbc)
766{
767 struct inode *inode = mapping->host;
768 pgoff_t start_index, end_index;
769 pgoff_t indices[PAGEVEC_SIZE];
770 struct dax_device *dax_dev;
771 struct pagevec pvec;
772 bool done = false;
773 int i, ret = 0;
774
775 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
776 return -EIO;
777
778 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
779 return 0;
780
781 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
782 if (!dax_dev)
783 return -EIO;
784
785 start_index = wbc->range_start >> PAGE_SHIFT;
786 end_index = wbc->range_end >> PAGE_SHIFT;
787
788 trace_dax_writeback_range(inode, start_index, end_index);
789
790 tag_pages_for_writeback(mapping, start_index, end_index);
791
792 pagevec_init(&pvec, 0);
793 while (!done) {
794 pvec.nr = find_get_entries_tag(mapping, start_index,
795 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
796 pvec.pages, indices);
797
798 if (pvec.nr == 0)
799 break;
800
801 for (i = 0; i < pvec.nr; i++) {
802 if (indices[i] > end_index) {
803 done = true;
804 break;
805 }
806
807 ret = dax_writeback_one(bdev, dax_dev, mapping,
808 indices[i], pvec.pages[i]);
809 if (ret < 0) {
810 mapping_set_error(mapping, ret);
811 goto out;
812 }
813 }
814 start_index = indices[pvec.nr - 1] + 1;
815 }
816out:
817 put_dax(dax_dev);
818 trace_dax_writeback_range_done(inode, start_index, end_index);
819 return (ret < 0 ? ret : 0);
820}
821EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
822
823static int dax_insert_mapping(struct address_space *mapping,
824 struct block_device *bdev, struct dax_device *dax_dev,
825 sector_t sector, size_t size, void *entry,
826 struct vm_area_struct *vma, struct vm_fault *vmf)
827{
828 unsigned long vaddr = vmf->address;
829 void *ret, *kaddr;
830 pgoff_t pgoff;
831 int id, rc;
832 pfn_t pfn;
833
834 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
835 if (rc)
836 return rc;
837
838 id = dax_read_lock();
839 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
840 if (rc < 0) {
841 dax_read_unlock(id);
842 return rc;
843 }
844 dax_read_unlock(id);
845
846 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
847 if (IS_ERR(ret))
848 return PTR_ERR(ret);
849
850 trace_dax_insert_mapping(mapping->host, vmf, ret);
851 if (vmf->flags & FAULT_FLAG_WRITE)
852 return vm_insert_mixed_mkwrite(vma, vaddr, pfn);
853 else
854 return vm_insert_mixed(vma, vaddr, pfn);
855}
856
857
858
859
860
861
862
863
864static int dax_load_hole(struct address_space *mapping, void *entry,
865 struct vm_fault *vmf)
866{
867 struct inode *inode = mapping->host;
868 unsigned long vaddr = vmf->address;
869 int ret = VM_FAULT_NOPAGE;
870 struct page *zero_page;
871 void *entry2;
872
873 zero_page = ZERO_PAGE(0);
874 if (unlikely(!zero_page)) {
875 ret = VM_FAULT_OOM;
876 goto out;
877 }
878
879 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
880 RADIX_DAX_ZERO_PAGE);
881 if (IS_ERR(entry2)) {
882 ret = VM_FAULT_SIGBUS;
883 goto out;
884 }
885
886 vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
887out:
888 trace_dax_load_hole(inode, vmf, ret);
889 return ret;
890}
891
892static bool dax_range_is_aligned(struct block_device *bdev,
893 unsigned int offset, unsigned int length)
894{
895 unsigned short sector_size = bdev_logical_block_size(bdev);
896
897 if (!IS_ALIGNED(offset, sector_size))
898 return false;
899 if (!IS_ALIGNED(length, sector_size))
900 return false;
901
902 return true;
903}
904
905int __dax_zero_page_range(struct block_device *bdev,
906 struct dax_device *dax_dev, sector_t sector,
907 unsigned int offset, unsigned int size)
908{
909 if (dax_range_is_aligned(bdev, offset, size)) {
910 sector_t start_sector = sector + (offset >> 9);
911
912 return blkdev_issue_zeroout(bdev, start_sector,
913 size >> 9, GFP_NOFS, 0);
914 } else {
915 pgoff_t pgoff;
916 long rc, id;
917 void *kaddr;
918 pfn_t pfn;
919
920 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
921 if (rc)
922 return rc;
923
924 id = dax_read_lock();
925 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
926 &pfn);
927 if (rc < 0) {
928 dax_read_unlock(id);
929 return rc;
930 }
931 memset(kaddr + offset, 0, size);
932 dax_flush(dax_dev, kaddr + offset, size);
933 dax_read_unlock(id);
934 }
935 return 0;
936}
937EXPORT_SYMBOL_GPL(__dax_zero_page_range);
938
939static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
940{
941 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
942}
943
944static loff_t
945dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
946 struct iomap *iomap)
947{
948 struct block_device *bdev = iomap->bdev;
949 struct dax_device *dax_dev = iomap->dax_dev;
950 struct iov_iter *iter = data;
951 loff_t end = pos + length, done = 0;
952 ssize_t ret = 0;
953 int id;
954
955 if (iov_iter_rw(iter) == READ) {
956 end = min(end, i_size_read(inode));
957 if (pos >= end)
958 return 0;
959
960 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
961 return iov_iter_zero(min(length, end - pos), iter);
962 }
963
964 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
965 return -EIO;
966
967
968
969
970
971
972 if (iomap->flags & IOMAP_F_NEW) {
973 invalidate_inode_pages2_range(inode->i_mapping,
974 pos >> PAGE_SHIFT,
975 (end - 1) >> PAGE_SHIFT);
976 }
977
978 id = dax_read_lock();
979 while (pos < end) {
980 unsigned offset = pos & (PAGE_SIZE - 1);
981 const size_t size = ALIGN(length + offset, PAGE_SIZE);
982 const sector_t sector = dax_iomap_sector(iomap, pos);
983 ssize_t map_len;
984 pgoff_t pgoff;
985 void *kaddr;
986 pfn_t pfn;
987
988 if (fatal_signal_pending(current)) {
989 ret = -EINTR;
990 break;
991 }
992
993 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
994 if (ret)
995 break;
996
997 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
998 &kaddr, &pfn);
999 if (map_len < 0) {
1000 ret = map_len;
1001 break;
1002 }
1003
1004 map_len = PFN_PHYS(map_len);
1005 kaddr += offset;
1006 map_len -= offset;
1007 if (map_len > end - pos)
1008 map_len = end - pos;
1009
1010
1011
1012
1013
1014
1015 if (iov_iter_rw(iter) == WRITE)
1016 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1017 map_len, iter);
1018 else
1019 map_len = copy_to_iter(kaddr, map_len, iter);
1020 if (map_len <= 0) {
1021 ret = map_len ? map_len : -EFAULT;
1022 break;
1023 }
1024
1025 pos += map_len;
1026 length -= map_len;
1027 done += map_len;
1028 }
1029 dax_read_unlock(id);
1030
1031 return done ? done : ret;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044ssize_t
1045dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1046 const struct iomap_ops *ops)
1047{
1048 struct address_space *mapping = iocb->ki_filp->f_mapping;
1049 struct inode *inode = mapping->host;
1050 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1051 unsigned flags = 0;
1052
1053 if (iov_iter_rw(iter) == WRITE) {
1054 lockdep_assert_held_exclusive(&inode->i_rwsem);
1055 flags |= IOMAP_WRITE;
1056 } else {
1057 lockdep_assert_held(&inode->i_rwsem);
1058 }
1059
1060 while (iov_iter_count(iter)) {
1061 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1062 iter, dax_iomap_actor);
1063 if (ret <= 0)
1064 break;
1065 pos += ret;
1066 done += ret;
1067 }
1068
1069 iocb->ki_pos += done;
1070 return done ? done : ret;
1071}
1072EXPORT_SYMBOL_GPL(dax_iomap_rw);
1073
1074static int dax_fault_return(int error)
1075{
1076 if (error == 0)
1077 return VM_FAULT_NOPAGE;
1078 if (error == -ENOMEM)
1079 return VM_FAULT_OOM;
1080 return VM_FAULT_SIGBUS;
1081}
1082
1083static int dax_iomap_pte_fault(struct vm_fault *vmf,
1084 const struct iomap_ops *ops)
1085{
1086 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1087 struct inode *inode = mapping->host;
1088 unsigned long vaddr = vmf->address;
1089 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1090 sector_t sector;
1091 struct iomap iomap = { 0 };
1092 unsigned flags = IOMAP_FAULT;
1093 int error, major = 0;
1094 int vmf_ret = 0;
1095 void *entry;
1096
1097 trace_dax_pte_fault(inode, vmf, vmf_ret);
1098
1099
1100
1101
1102
1103 if (pos >= i_size_read(inode)) {
1104 vmf_ret = VM_FAULT_SIGBUS;
1105 goto out;
1106 }
1107
1108 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1109 flags |= IOMAP_WRITE;
1110
1111 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1112 if (IS_ERR(entry)) {
1113 vmf_ret = dax_fault_return(PTR_ERR(entry));
1114 goto out;
1115 }
1116
1117
1118
1119
1120
1121
1122
1123 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1124 vmf_ret = VM_FAULT_NOPAGE;
1125 goto unlock_entry;
1126 }
1127
1128
1129
1130
1131
1132
1133 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1134 if (error) {
1135 vmf_ret = dax_fault_return(error);
1136 goto unlock_entry;
1137 }
1138 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1139 error = -EIO;
1140 goto error_finish_iomap;
1141 }
1142
1143 sector = dax_iomap_sector(&iomap, pos);
1144
1145 if (vmf->cow_page) {
1146 switch (iomap.type) {
1147 case IOMAP_HOLE:
1148 case IOMAP_UNWRITTEN:
1149 clear_user_highpage(vmf->cow_page, vaddr);
1150 break;
1151 case IOMAP_MAPPED:
1152 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1153 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1154 break;
1155 default:
1156 WARN_ON_ONCE(1);
1157 error = -EIO;
1158 break;
1159 }
1160
1161 if (error)
1162 goto error_finish_iomap;
1163
1164 __SetPageUptodate(vmf->cow_page);
1165 vmf_ret = finish_fault(vmf);
1166 if (!vmf_ret)
1167 vmf_ret = VM_FAULT_DONE_COW;
1168 goto finish_iomap;
1169 }
1170
1171 switch (iomap.type) {
1172 case IOMAP_MAPPED:
1173 if (iomap.flags & IOMAP_F_NEW) {
1174 count_vm_event(PGMAJFAULT);
1175 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1176 major = VM_FAULT_MAJOR;
1177 }
1178 error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1179 sector, PAGE_SIZE, entry, vmf->vma, vmf);
1180
1181 if (error == -EBUSY)
1182 error = 0;
1183 break;
1184 case IOMAP_UNWRITTEN:
1185 case IOMAP_HOLE:
1186 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1187 vmf_ret = dax_load_hole(mapping, entry, vmf);
1188 goto finish_iomap;
1189 }
1190
1191 default:
1192 WARN_ON_ONCE(1);
1193 error = -EIO;
1194 break;
1195 }
1196
1197 error_finish_iomap:
1198 vmf_ret = dax_fault_return(error) | major;
1199 finish_iomap:
1200 if (ops->iomap_end) {
1201 int copied = PAGE_SIZE;
1202
1203 if (vmf_ret & VM_FAULT_ERROR)
1204 copied = 0;
1205
1206
1207
1208
1209
1210
1211 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1212 }
1213 unlock_entry:
1214 put_locked_mapping_entry(mapping, vmf->pgoff);
1215 out:
1216 trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1217 return vmf_ret;
1218}
1219
1220#ifdef CONFIG_FS_DAX_PMD
1221static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1222 loff_t pos, void *entry)
1223{
1224 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1225 const sector_t sector = dax_iomap_sector(iomap, pos);
1226 struct dax_device *dax_dev = iomap->dax_dev;
1227 struct block_device *bdev = iomap->bdev;
1228 struct inode *inode = mapping->host;
1229 const size_t size = PMD_SIZE;
1230 void *ret = NULL, *kaddr;
1231 long length = 0;
1232 pgoff_t pgoff;
1233 pfn_t pfn = {};
1234 int id;
1235
1236 if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1237 goto fallback;
1238
1239 id = dax_read_lock();
1240 length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
1241 if (length < 0)
1242 goto unlock_fallback;
1243 length = PFN_PHYS(length);
1244
1245 if (length < size)
1246 goto unlock_fallback;
1247 if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
1248 goto unlock_fallback;
1249 if (!pfn_t_devmap(pfn))
1250 goto unlock_fallback;
1251 dax_read_unlock(id);
1252
1253 ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
1254 RADIX_DAX_PMD);
1255 if (IS_ERR(ret))
1256 goto fallback;
1257
1258 trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1259 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1260 pfn, vmf->flags & FAULT_FLAG_WRITE);
1261
1262unlock_fallback:
1263 dax_read_unlock(id);
1264fallback:
1265 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1266 return VM_FAULT_FALLBACK;
1267}
1268
1269static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1270 void *entry)
1271{
1272 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1273 unsigned long pmd_addr = vmf->address & PMD_MASK;
1274 struct inode *inode = mapping->host;
1275 struct page *zero_page;
1276 void *ret = NULL;
1277 spinlock_t *ptl;
1278 pmd_t pmd_entry;
1279
1280 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1281
1282 if (unlikely(!zero_page))
1283 goto fallback;
1284
1285 ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
1286 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE);
1287 if (IS_ERR(ret))
1288 goto fallback;
1289
1290 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1291 if (!pmd_none(*(vmf->pmd))) {
1292 spin_unlock(ptl);
1293 goto fallback;
1294 }
1295
1296 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1297 pmd_entry = pmd_mkhuge(pmd_entry);
1298 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1299 spin_unlock(ptl);
1300 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1301 return VM_FAULT_NOPAGE;
1302
1303fallback:
1304 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1305 return VM_FAULT_FALLBACK;
1306}
1307
1308static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1309 const struct iomap_ops *ops)
1310{
1311 struct vm_area_struct *vma = vmf->vma;
1312 struct address_space *mapping = vma->vm_file->f_mapping;
1313 unsigned long pmd_addr = vmf->address & PMD_MASK;
1314 bool write = vmf->flags & FAULT_FLAG_WRITE;
1315 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1316 struct inode *inode = mapping->host;
1317 int result = VM_FAULT_FALLBACK;
1318 struct iomap iomap = { 0 };
1319 pgoff_t max_pgoff, pgoff;
1320 void *entry;
1321 loff_t pos;
1322 int error;
1323
1324
1325
1326
1327
1328
1329 pgoff = linear_page_index(vma, pmd_addr);
1330 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1331
1332 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1333
1334
1335
1336
1337
1338
1339
1340 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1341 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1342 goto fallback;
1343
1344
1345 if (write && !(vma->vm_flags & VM_SHARED))
1346 goto fallback;
1347
1348
1349 if (pmd_addr < vma->vm_start)
1350 goto fallback;
1351 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1352 goto fallback;
1353
1354 if (pgoff > max_pgoff) {
1355 result = VM_FAULT_SIGBUS;
1356 goto out;
1357 }
1358
1359
1360 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1361 goto fallback;
1362
1363
1364
1365
1366
1367
1368
1369 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1370 if (IS_ERR(entry))
1371 goto fallback;
1372
1373
1374
1375
1376
1377
1378
1379 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1380 !pmd_devmap(*vmf->pmd)) {
1381 result = 0;
1382 goto unlock_entry;
1383 }
1384
1385
1386
1387
1388
1389
1390 pos = (loff_t)pgoff << PAGE_SHIFT;
1391 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1392 if (error)
1393 goto unlock_entry;
1394
1395 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1396 goto finish_iomap;
1397
1398 switch (iomap.type) {
1399 case IOMAP_MAPPED:
1400 result = dax_pmd_insert_mapping(vmf, &iomap, pos, entry);
1401 break;
1402 case IOMAP_UNWRITTEN:
1403 case IOMAP_HOLE:
1404 if (WARN_ON_ONCE(write))
1405 break;
1406 result = dax_pmd_load_hole(vmf, &iomap, entry);
1407 break;
1408 default:
1409 WARN_ON_ONCE(1);
1410 break;
1411 }
1412
1413 finish_iomap:
1414 if (ops->iomap_end) {
1415 int copied = PMD_SIZE;
1416
1417 if (result == VM_FAULT_FALLBACK)
1418 copied = 0;
1419
1420
1421
1422
1423
1424
1425 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1426 &iomap);
1427 }
1428 unlock_entry:
1429 put_locked_mapping_entry(mapping, pgoff);
1430 fallback:
1431 if (result == VM_FAULT_FALLBACK) {
1432 split_huge_pmd(vma, vmf->pmd, vmf->address);
1433 count_vm_event(THP_FAULT_FALLBACK);
1434 }
1435out:
1436 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1437 return result;
1438}
1439#else
1440static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1441 const struct iomap_ops *ops)
1442{
1443 return VM_FAULT_FALLBACK;
1444}
1445#endif
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1458 const struct iomap_ops *ops)
1459{
1460 switch (pe_size) {
1461 case PE_SIZE_PTE:
1462 return dax_iomap_pte_fault(vmf, ops);
1463 case PE_SIZE_PMD:
1464 return dax_iomap_pmd_fault(vmf, ops);
1465 default:
1466 return VM_FAULT_FALLBACK;
1467 }
1468}
1469EXPORT_SYMBOL_GPL(dax_iomap_fault);
1470