1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/dax.h>
21#include <linux/fs.h>
22#include <linux/genhd.h>
23#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
26#include <linux/mutex.h>
27#include <linux/pagevec.h>
28#include <linux/sched.h>
29#include <linux/sched/signal.h>
30#include <linux/uio.h>
31#include <linux/vmstat.h>
32#include <linux/pfn_t.h>
33#include <linux/sizes.h>
34#include <linux/mmu_notifier.h>
35#include <linux/iomap.h>
36#include "internal.h"
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
41
42#define DAX_WAIT_TABLE_BITS 12
43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45
46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
48
49static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
50
51static int __init init_dax_wait_table(void)
52{
53 int i;
54
55 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56 init_waitqueue_head(wait_table + i);
57 return 0;
58}
59fs_initcall(init_dax_wait_table);
60
61
62
63
64
65
66
67
68
69
70#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75
76static unsigned long dax_radix_pfn(void *entry)
77{
78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
79}
80
81static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
82{
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
85}
86
87static unsigned int dax_radix_order(void *entry)
88{
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
92}
93
94static int dax_is_pmd_entry(void *entry)
95{
96 return (unsigned long)entry & RADIX_DAX_PMD;
97}
98
99static int dax_is_pte_entry(void *entry)
100{
101 return !((unsigned long)entry & RADIX_DAX_PMD);
102}
103
104static int dax_is_zero_entry(void *entry)
105{
106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107}
108
109static int dax_is_empty_entry(void *entry)
110{
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
112}
113
114
115
116
117struct exceptional_entry_key {
118 struct address_space *mapping;
119 pgoff_t entry_start;
120};
121
122struct wait_exceptional_entry_queue {
123 wait_queue_entry_t wait;
124 struct exceptional_entry_key key;
125};
126
127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
129{
130 unsigned long hash;
131
132
133
134
135
136
137 if (dax_is_pmd_entry(entry))
138 index &= ~PG_PMD_COLOUR;
139
140 key->mapping = mapping;
141 key->entry_start = index;
142
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
145}
146
147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148 int sync, void *keyp)
149{
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
153
154 if (key->mapping != ewait->key.mapping ||
155 key->entry_start != ewait->key.entry_start)
156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
158}
159
160
161
162
163
164
165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166 pgoff_t index, void *entry, bool wake_all)
167{
168 struct exceptional_entry_key key;
169 wait_queue_head_t *wq;
170
171 wq = dax_entry_waitqueue(mapping, index, entry, &key);
172
173
174
175
176
177
178
179 if (waitqueue_active(wq))
180 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
181}
182
183
184
185
186
187static inline int slot_locked(struct address_space *mapping, void **slot)
188{
189 unsigned long entry = (unsigned long)
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191 return entry & RADIX_DAX_ENTRY_LOCK;
192}
193
194
195
196
197static inline void *lock_slot(struct address_space *mapping, void **slot)
198{
199 unsigned long entry = (unsigned long)
200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
201
202 entry |= RADIX_DAX_ENTRY_LOCK;
203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204 return (void *)entry;
205}
206
207
208
209
210static inline void *unlock_slot(struct address_space *mapping, void **slot)
211{
212 unsigned long entry = (unsigned long)
213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
214
215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217 return (void *)entry;
218}
219
220
221
222
223
224
225
226
227
228
229static void *get_unlocked_mapping_entry(struct address_space *mapping,
230 pgoff_t index, void ***slotp)
231{
232 void *entry, **slot;
233 struct wait_exceptional_entry_queue ewait;
234 wait_queue_head_t *wq;
235
236 init_wait(&ewait.wait);
237 ewait.wait.func = wake_exceptional_entry_func;
238
239 for (;;) {
240 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
241 &slot);
242 if (!entry ||
243 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
244 !slot_locked(mapping, slot)) {
245 if (slotp)
246 *slotp = slot;
247 return entry;
248 }
249
250 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
251 prepare_to_wait_exclusive(wq, &ewait.wait,
252 TASK_UNINTERRUPTIBLE);
253 xa_unlock_irq(&mapping->i_pages);
254 schedule();
255 finish_wait(wq, &ewait.wait);
256 xa_lock_irq(&mapping->i_pages);
257 }
258}
259
260static void dax_unlock_mapping_entry(struct address_space *mapping,
261 pgoff_t index)
262{
263 void *entry, **slot;
264
265 xa_lock_irq(&mapping->i_pages);
266 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
267 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
268 !slot_locked(mapping, slot))) {
269 xa_unlock_irq(&mapping->i_pages);
270 return;
271 }
272 unlock_slot(mapping, slot);
273 xa_unlock_irq(&mapping->i_pages);
274 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
275}
276
277static void put_locked_mapping_entry(struct address_space *mapping,
278 pgoff_t index)
279{
280 dax_unlock_mapping_entry(mapping, index);
281}
282
283
284
285
286
287static void put_unlocked_mapping_entry(struct address_space *mapping,
288 pgoff_t index, void *entry)
289{
290 if (!entry)
291 return;
292
293
294 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
295}
296
297static unsigned long dax_entry_size(void *entry)
298{
299 if (dax_is_zero_entry(entry))
300 return 0;
301 else if (dax_is_empty_entry(entry))
302 return 0;
303 else if (dax_is_pmd_entry(entry))
304 return PMD_SIZE;
305 else
306 return PAGE_SIZE;
307}
308
309static unsigned long dax_radix_end_pfn(void *entry)
310{
311 return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
312}
313
314
315
316
317
318#define for_each_mapped_pfn(entry, pfn) \
319 for (pfn = dax_radix_pfn(entry); \
320 pfn < dax_radix_end_pfn(entry); pfn++)
321
322static void dax_associate_entry(void *entry, struct address_space *mapping)
323{
324 unsigned long pfn;
325
326 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
327 return;
328
329 for_each_mapped_pfn(entry, pfn) {
330 struct page *page = pfn_to_page(pfn);
331
332 WARN_ON_ONCE(page->mapping);
333 page->mapping = mapping;
334 }
335}
336
337static void dax_disassociate_entry(void *entry, struct address_space *mapping,
338 bool trunc)
339{
340 unsigned long pfn;
341
342 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
343 return;
344
345 for_each_mapped_pfn(entry, pfn) {
346 struct page *page = pfn_to_page(pfn);
347
348 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
349 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
350 page->mapping = NULL;
351 }
352}
353
354static struct page *dax_busy_page(void *entry)
355{
356 unsigned long pfn;
357
358 for_each_mapped_pfn(entry, pfn) {
359 struct page *page = pfn_to_page(pfn);
360
361 if (page_ref_count(page) > 1)
362 return page;
363 }
364 return NULL;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
394 unsigned long size_flag)
395{
396 bool pmd_downgrade = false;
397 void *entry, **slot;
398
399restart:
400 xa_lock_irq(&mapping->i_pages);
401 entry = get_unlocked_mapping_entry(mapping, index, &slot);
402
403 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
404 entry = ERR_PTR(-EIO);
405 goto out_unlock;
406 }
407
408 if (entry) {
409 if (size_flag & RADIX_DAX_PMD) {
410 if (dax_is_pte_entry(entry)) {
411 put_unlocked_mapping_entry(mapping, index,
412 entry);
413 entry = ERR_PTR(-EEXIST);
414 goto out_unlock;
415 }
416 } else {
417 if (dax_is_pmd_entry(entry) &&
418 (dax_is_zero_entry(entry) ||
419 dax_is_empty_entry(entry))) {
420 pmd_downgrade = true;
421 }
422 }
423 }
424
425
426 if (!entry || pmd_downgrade) {
427 int err;
428
429 if (pmd_downgrade) {
430
431
432
433
434 entry = lock_slot(mapping, slot);
435 }
436
437 xa_unlock_irq(&mapping->i_pages);
438
439
440
441
442
443 if (pmd_downgrade && dax_is_zero_entry(entry))
444 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
445 PG_PMD_NR, false);
446
447 err = radix_tree_preload(
448 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
449 if (err) {
450 if (pmd_downgrade)
451 put_locked_mapping_entry(mapping, index);
452 return ERR_PTR(err);
453 }
454 xa_lock_irq(&mapping->i_pages);
455
456 if (!entry) {
457
458
459
460
461
462
463 entry = __radix_tree_lookup(&mapping->i_pages, index,
464 NULL, &slot);
465 if (entry) {
466 radix_tree_preload_end();
467 xa_unlock_irq(&mapping->i_pages);
468 goto restart;
469 }
470 }
471
472 if (pmd_downgrade) {
473 dax_disassociate_entry(entry, mapping, false);
474 radix_tree_delete(&mapping->i_pages, index);
475 mapping->nrexceptional--;
476 dax_wake_mapping_entry_waiter(mapping, index, entry,
477 true);
478 }
479
480 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
481
482 err = __radix_tree_insert(&mapping->i_pages, index,
483 dax_radix_order(entry), entry);
484 radix_tree_preload_end();
485 if (err) {
486 xa_unlock_irq(&mapping->i_pages);
487
488
489
490
491
492
493
494
495 return ERR_PTR(err);
496 }
497
498 mapping->nrexceptional++;
499 xa_unlock_irq(&mapping->i_pages);
500 return entry;
501 }
502 entry = lock_slot(mapping, slot);
503 out_unlock:
504 xa_unlock_irq(&mapping->i_pages);
505 return entry;
506}
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct page *dax_layout_busy_page(struct address_space *mapping)
524{
525 pgoff_t indices[PAGEVEC_SIZE];
526 struct page *page = NULL;
527 struct pagevec pvec;
528 pgoff_t index, end;
529 unsigned i;
530
531
532
533
534 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
535 return NULL;
536
537 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
538 return NULL;
539
540 pagevec_init(&pvec);
541 index = 0;
542 end = -1;
543
544
545
546
547
548
549
550
551
552
553
554
555
556 unmap_mapping_range(mapping, 0, 0, 1);
557
558 while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
559 min(end - index, (pgoff_t)PAGEVEC_SIZE),
560 indices)) {
561 for (i = 0; i < pagevec_count(&pvec); i++) {
562 struct page *pvec_ent = pvec.pages[i];
563 void *entry;
564
565 index = indices[i];
566 if (index >= end)
567 break;
568
569 if (!radix_tree_exceptional_entry(pvec_ent))
570 continue;
571
572 xa_lock_irq(&mapping->i_pages);
573 entry = get_unlocked_mapping_entry(mapping, index, NULL);
574 if (entry)
575 page = dax_busy_page(entry);
576 put_unlocked_mapping_entry(mapping, index, entry);
577 xa_unlock_irq(&mapping->i_pages);
578 if (page)
579 break;
580 }
581 pagevec_remove_exceptionals(&pvec);
582 pagevec_release(&pvec);
583 index++;
584
585 if (page)
586 break;
587 }
588 return page;
589}
590EXPORT_SYMBOL_GPL(dax_layout_busy_page);
591
592static int __dax_invalidate_mapping_entry(struct address_space *mapping,
593 pgoff_t index, bool trunc)
594{
595 int ret = 0;
596 void *entry;
597 struct radix_tree_root *pages = &mapping->i_pages;
598
599 xa_lock_irq(pages);
600 entry = get_unlocked_mapping_entry(mapping, index, NULL);
601 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
602 goto out;
603 if (!trunc &&
604 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
605 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
606 goto out;
607 dax_disassociate_entry(entry, mapping, trunc);
608 radix_tree_delete(pages, index);
609 mapping->nrexceptional--;
610 ret = 1;
611out:
612 put_unlocked_mapping_entry(mapping, index, entry);
613 xa_unlock_irq(pages);
614 return ret;
615}
616
617
618
619
620int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
621{
622 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
623
624
625
626
627
628
629
630
631 WARN_ON_ONCE(!ret);
632 return ret;
633}
634
635
636
637
638int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
639 pgoff_t index)
640{
641 return __dax_invalidate_mapping_entry(mapping, index, false);
642}
643
644static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
645 sector_t sector, size_t size, struct page *to,
646 unsigned long vaddr)
647{
648 void *vto, *kaddr;
649 pgoff_t pgoff;
650 pfn_t pfn;
651 long rc;
652 int id;
653
654 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
655 if (rc)
656 return rc;
657
658 id = dax_read_lock();
659 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
660 if (rc < 0) {
661 dax_read_unlock(id);
662 return rc;
663 }
664 vto = kmap_atomic(to);
665 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
666 kunmap_atomic(vto);
667 dax_read_unlock(id);
668 return 0;
669}
670
671
672
673
674
675
676
677
678static void *dax_insert_mapping_entry(struct address_space *mapping,
679 struct vm_fault *vmf,
680 void *entry, pfn_t pfn_t,
681 unsigned long flags, bool dirty)
682{
683 struct radix_tree_root *pages = &mapping->i_pages;
684 unsigned long pfn = pfn_t_to_pfn(pfn_t);
685 pgoff_t index = vmf->pgoff;
686 void *new_entry;
687
688 if (dirty)
689 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
690
691 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
692
693 if (dax_is_pmd_entry(entry))
694 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
695 PG_PMD_NR, false);
696 else
697 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
698 }
699
700 xa_lock_irq(pages);
701 new_entry = dax_radix_locked_entry(pfn, flags);
702 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
703 dax_disassociate_entry(entry, mapping, false);
704 dax_associate_entry(new_entry, mapping);
705 }
706
707 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
708
709
710
711
712
713
714
715
716 struct radix_tree_node *node;
717 void **slot;
718 void *ret;
719
720 ret = __radix_tree_lookup(pages, index, &node, &slot);
721 WARN_ON_ONCE(ret != entry);
722 __radix_tree_replace(pages, node, slot,
723 new_entry, NULL);
724 entry = new_entry;
725 }
726
727 if (dirty)
728 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
729
730 xa_unlock_irq(pages);
731 return entry;
732}
733
734static inline unsigned long
735pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
736{
737 unsigned long address;
738
739 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
740 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
741 return address;
742}
743
744
745static void dax_mapping_entry_mkclean(struct address_space *mapping,
746 pgoff_t index, unsigned long pfn)
747{
748 struct vm_area_struct *vma;
749 pte_t pte, *ptep = NULL;
750 pmd_t *pmdp = NULL;
751 spinlock_t *ptl;
752
753 i_mmap_lock_read(mapping);
754 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
755 unsigned long address, start, end;
756
757 cond_resched();
758
759 if (!(vma->vm_flags & VM_SHARED))
760 continue;
761
762 address = pgoff_address(index, vma);
763
764
765
766
767
768
769 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
770 continue;
771
772
773
774
775
776
777
778
779 if (pmdp) {
780#ifdef CONFIG_FS_DAX_PMD
781 pmd_t pmd;
782
783 if (pfn != pmd_pfn(*pmdp))
784 goto unlock_pmd;
785 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
786 goto unlock_pmd;
787
788 flush_cache_page(vma, address, pfn);
789 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
790 pmd = pmd_wrprotect(pmd);
791 pmd = pmd_mkclean(pmd);
792 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
793unlock_pmd:
794#endif
795 spin_unlock(ptl);
796 } else {
797 if (pfn != pte_pfn(*ptep))
798 goto unlock_pte;
799 if (!pte_dirty(*ptep) && !pte_write(*ptep))
800 goto unlock_pte;
801
802 flush_cache_page(vma, address, pfn);
803 pte = ptep_clear_flush(vma, address, ptep);
804 pte = pte_wrprotect(pte);
805 pte = pte_mkclean(pte);
806 set_pte_at(vma->vm_mm, address, ptep, pte);
807unlock_pte:
808 pte_unmap_unlock(ptep, ptl);
809 }
810
811 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
812 }
813 i_mmap_unlock_read(mapping);
814}
815
816static int dax_writeback_one(struct dax_device *dax_dev,
817 struct address_space *mapping, pgoff_t index, void *entry)
818{
819 struct radix_tree_root *pages = &mapping->i_pages;
820 void *entry2, **slot;
821 unsigned long pfn;
822 long ret = 0;
823 size_t size;
824
825
826
827
828
829 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
830 return -EIO;
831
832 xa_lock_irq(pages);
833 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
834
835 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
836 goto put_unlocked;
837
838
839
840
841
842 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
843 goto put_unlocked;
844 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
845 dax_is_zero_entry(entry))) {
846 ret = -EIO;
847 goto put_unlocked;
848 }
849
850
851 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
852 goto put_unlocked;
853
854 entry = lock_slot(mapping, slot);
855
856
857
858
859
860
861
862 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
863 xa_unlock_irq(pages);
864
865
866
867
868
869
870
871
872 pfn = dax_radix_pfn(entry);
873 size = PAGE_SIZE << dax_radix_order(entry);
874
875 dax_mapping_entry_mkclean(mapping, index, pfn);
876 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
877
878
879
880
881
882
883 xa_lock_irq(pages);
884 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
885 xa_unlock_irq(pages);
886 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
887 put_locked_mapping_entry(mapping, index);
888 return ret;
889
890 put_unlocked:
891 put_unlocked_mapping_entry(mapping, index, entry2);
892 xa_unlock_irq(pages);
893 return ret;
894}
895
896
897
898
899
900
901int dax_writeback_mapping_range(struct address_space *mapping,
902 struct block_device *bdev, struct writeback_control *wbc)
903{
904 struct inode *inode = mapping->host;
905 pgoff_t start_index, end_index;
906 pgoff_t indices[PAGEVEC_SIZE];
907 struct dax_device *dax_dev;
908 struct pagevec pvec;
909 bool done = false;
910 int i, ret = 0;
911
912 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
913 return -EIO;
914
915 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
916 return 0;
917
918 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
919 if (!dax_dev)
920 return -EIO;
921
922 start_index = wbc->range_start >> PAGE_SHIFT;
923 end_index = wbc->range_end >> PAGE_SHIFT;
924
925 trace_dax_writeback_range(inode, start_index, end_index);
926
927 tag_pages_for_writeback(mapping, start_index, end_index);
928
929 pagevec_init(&pvec);
930 while (!done) {
931 pvec.nr = find_get_entries_tag(mapping, start_index,
932 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
933 pvec.pages, indices);
934
935 if (pvec.nr == 0)
936 break;
937
938 for (i = 0; i < pvec.nr; i++) {
939 if (indices[i] > end_index) {
940 done = true;
941 break;
942 }
943
944 ret = dax_writeback_one(dax_dev, mapping, indices[i],
945 pvec.pages[i]);
946 if (ret < 0) {
947 mapping_set_error(mapping, ret);
948 goto out;
949 }
950 }
951 start_index = indices[pvec.nr - 1] + 1;
952 }
953out:
954 put_dax(dax_dev);
955 trace_dax_writeback_range_done(inode, start_index, end_index);
956 return (ret < 0 ? ret : 0);
957}
958EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
959
960static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
961{
962 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
963}
964
965static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
966 pfn_t *pfnp)
967{
968 const sector_t sector = dax_iomap_sector(iomap, pos);
969 pgoff_t pgoff;
970 void *kaddr;
971 int id, rc;
972 long length;
973
974 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
975 if (rc)
976 return rc;
977 id = dax_read_lock();
978 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
979 &kaddr, pfnp);
980 if (length < 0) {
981 rc = length;
982 goto out;
983 }
984 rc = -EINVAL;
985 if (PFN_PHYS(length) < size)
986 goto out;
987 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
988 goto out;
989
990 if (length > 1 && !pfn_t_devmap(*pfnp))
991 goto out;
992 rc = 0;
993out:
994 dax_read_unlock(id);
995 return rc;
996}
997
998
999
1000
1001
1002
1003
1004
1005static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1006 struct vm_fault *vmf)
1007{
1008 struct inode *inode = mapping->host;
1009 unsigned long vaddr = vmf->address;
1010 vm_fault_t ret = VM_FAULT_NOPAGE;
1011 struct page *zero_page;
1012 pfn_t pfn;
1013
1014 zero_page = ZERO_PAGE(0);
1015 if (unlikely(!zero_page)) {
1016 ret = VM_FAULT_OOM;
1017 goto out;
1018 }
1019
1020 pfn = page_to_pfn_t(zero_page);
1021 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1022 false);
1023 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1024out:
1025 trace_dax_load_hole(inode, vmf, ret);
1026 return ret;
1027}
1028
1029static bool dax_range_is_aligned(struct block_device *bdev,
1030 unsigned int offset, unsigned int length)
1031{
1032 unsigned short sector_size = bdev_logical_block_size(bdev);
1033
1034 if (!IS_ALIGNED(offset, sector_size))
1035 return false;
1036 if (!IS_ALIGNED(length, sector_size))
1037 return false;
1038
1039 return true;
1040}
1041
1042int __dax_zero_page_range(struct block_device *bdev,
1043 struct dax_device *dax_dev, sector_t sector,
1044 unsigned int offset, unsigned int size)
1045{
1046 if (dax_range_is_aligned(bdev, offset, size)) {
1047 sector_t start_sector = sector + (offset >> 9);
1048
1049 return blkdev_issue_zeroout(bdev, start_sector,
1050 size >> 9, GFP_NOFS, 0);
1051 } else {
1052 pgoff_t pgoff;
1053 long rc, id;
1054 void *kaddr;
1055 pfn_t pfn;
1056
1057 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1058 if (rc)
1059 return rc;
1060
1061 id = dax_read_lock();
1062 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
1063 &pfn);
1064 if (rc < 0) {
1065 dax_read_unlock(id);
1066 return rc;
1067 }
1068 memset(kaddr + offset, 0, size);
1069 dax_flush(dax_dev, kaddr + offset, size);
1070 dax_read_unlock(id);
1071 }
1072 return 0;
1073}
1074EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1075
1076static loff_t
1077dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1078 struct iomap *iomap)
1079{
1080 struct block_device *bdev = iomap->bdev;
1081 struct dax_device *dax_dev = iomap->dax_dev;
1082 struct iov_iter *iter = data;
1083 loff_t end = pos + length, done = 0;
1084 ssize_t ret = 0;
1085 size_t xfer;
1086 int id;
1087
1088 if (iov_iter_rw(iter) == READ) {
1089 end = min(end, i_size_read(inode));
1090 if (pos >= end)
1091 return 0;
1092
1093 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1094 return iov_iter_zero(min(length, end - pos), iter);
1095 }
1096
1097 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1098 return -EIO;
1099
1100
1101
1102
1103
1104
1105 if (iomap->flags & IOMAP_F_NEW) {
1106 invalidate_inode_pages2_range(inode->i_mapping,
1107 pos >> PAGE_SHIFT,
1108 (end - 1) >> PAGE_SHIFT);
1109 }
1110
1111 id = dax_read_lock();
1112 while (pos < end) {
1113 unsigned offset = pos & (PAGE_SIZE - 1);
1114 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1115 const sector_t sector = dax_iomap_sector(iomap, pos);
1116 ssize_t map_len;
1117 pgoff_t pgoff;
1118 void *kaddr;
1119 pfn_t pfn;
1120
1121 if (fatal_signal_pending(current)) {
1122 ret = -EINTR;
1123 break;
1124 }
1125
1126 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1127 if (ret)
1128 break;
1129
1130 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1131 &kaddr, &pfn);
1132 if (map_len < 0) {
1133 ret = map_len;
1134 break;
1135 }
1136
1137 map_len = PFN_PHYS(map_len);
1138 kaddr += offset;
1139 map_len -= offset;
1140 if (map_len > end - pos)
1141 map_len = end - pos;
1142
1143
1144
1145
1146
1147
1148 if (iov_iter_rw(iter) == WRITE)
1149 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1150 map_len, iter);
1151 else
1152 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1153 map_len, iter);
1154
1155 pos += xfer;
1156 length -= xfer;
1157 done += xfer;
1158
1159 if (xfer == 0)
1160 ret = -EFAULT;
1161 if (xfer < map_len)
1162 break;
1163 }
1164 dax_read_unlock(id);
1165
1166 return done ? done : ret;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179ssize_t
1180dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1181 const struct iomap_ops *ops)
1182{
1183 struct address_space *mapping = iocb->ki_filp->f_mapping;
1184 struct inode *inode = mapping->host;
1185 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1186 unsigned flags = 0;
1187
1188 if (iov_iter_rw(iter) == WRITE) {
1189 lockdep_assert_held_exclusive(&inode->i_rwsem);
1190 flags |= IOMAP_WRITE;
1191 } else {
1192 lockdep_assert_held(&inode->i_rwsem);
1193 }
1194
1195 while (iov_iter_count(iter)) {
1196 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1197 iter, dax_iomap_actor);
1198 if (ret <= 0)
1199 break;
1200 pos += ret;
1201 done += ret;
1202 }
1203
1204 iocb->ki_pos += done;
1205 return done ? done : ret;
1206}
1207EXPORT_SYMBOL_GPL(dax_iomap_rw);
1208
1209static vm_fault_t dax_fault_return(int error)
1210{
1211 if (error == 0)
1212 return VM_FAULT_NOPAGE;
1213 if (error == -ENOMEM)
1214 return VM_FAULT_OOM;
1215 return VM_FAULT_SIGBUS;
1216}
1217
1218
1219
1220
1221
1222static bool dax_fault_is_synchronous(unsigned long flags,
1223 struct vm_area_struct *vma, struct iomap *iomap)
1224{
1225 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1226 && (iomap->flags & IOMAP_F_DIRTY);
1227}
1228
1229static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1230 int *iomap_errp, const struct iomap_ops *ops)
1231{
1232 struct vm_area_struct *vma = vmf->vma;
1233 struct address_space *mapping = vma->vm_file->f_mapping;
1234 struct inode *inode = mapping->host;
1235 unsigned long vaddr = vmf->address;
1236 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1237 struct iomap iomap = { 0 };
1238 unsigned flags = IOMAP_FAULT;
1239 int error, major = 0;
1240 bool write = vmf->flags & FAULT_FLAG_WRITE;
1241 bool sync;
1242 vm_fault_t ret = 0;
1243 void *entry;
1244 pfn_t pfn;
1245
1246 trace_dax_pte_fault(inode, vmf, ret);
1247
1248
1249
1250
1251
1252 if (pos >= i_size_read(inode)) {
1253 ret = VM_FAULT_SIGBUS;
1254 goto out;
1255 }
1256
1257 if (write && !vmf->cow_page)
1258 flags |= IOMAP_WRITE;
1259
1260 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1261 if (IS_ERR(entry)) {
1262 ret = dax_fault_return(PTR_ERR(entry));
1263 goto out;
1264 }
1265
1266
1267
1268
1269
1270
1271
1272 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1273 ret = VM_FAULT_NOPAGE;
1274 goto unlock_entry;
1275 }
1276
1277
1278
1279
1280
1281
1282 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1283 if (iomap_errp)
1284 *iomap_errp = error;
1285 if (error) {
1286 ret = dax_fault_return(error);
1287 goto unlock_entry;
1288 }
1289 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1290 error = -EIO;
1291 goto error_finish_iomap;
1292 }
1293
1294 if (vmf->cow_page) {
1295 sector_t sector = dax_iomap_sector(&iomap, pos);
1296
1297 switch (iomap.type) {
1298 case IOMAP_HOLE:
1299 case IOMAP_UNWRITTEN:
1300 clear_user_highpage(vmf->cow_page, vaddr);
1301 break;
1302 case IOMAP_MAPPED:
1303 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1304 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1305 break;
1306 default:
1307 WARN_ON_ONCE(1);
1308 error = -EIO;
1309 break;
1310 }
1311
1312 if (error)
1313 goto error_finish_iomap;
1314
1315 __SetPageUptodate(vmf->cow_page);
1316 ret = finish_fault(vmf);
1317 if (!ret)
1318 ret = VM_FAULT_DONE_COW;
1319 goto finish_iomap;
1320 }
1321
1322 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1323
1324 switch (iomap.type) {
1325 case IOMAP_MAPPED:
1326 if (iomap.flags & IOMAP_F_NEW) {
1327 count_vm_event(PGMAJFAULT);
1328 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1329 major = VM_FAULT_MAJOR;
1330 }
1331 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1332 if (error < 0)
1333 goto error_finish_iomap;
1334
1335 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1336 0, write && !sync);
1337
1338
1339
1340
1341
1342
1343
1344 if (sync) {
1345 if (WARN_ON_ONCE(!pfnp)) {
1346 error = -EIO;
1347 goto error_finish_iomap;
1348 }
1349 *pfnp = pfn;
1350 ret = VM_FAULT_NEEDDSYNC | major;
1351 goto finish_iomap;
1352 }
1353 trace_dax_insert_mapping(inode, vmf, entry);
1354 if (write)
1355 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1356 else
1357 ret = vmf_insert_mixed(vma, vaddr, pfn);
1358
1359 goto finish_iomap;
1360 case IOMAP_UNWRITTEN:
1361 case IOMAP_HOLE:
1362 if (!write) {
1363 ret = dax_load_hole(mapping, entry, vmf);
1364 goto finish_iomap;
1365 }
1366
1367 default:
1368 WARN_ON_ONCE(1);
1369 error = -EIO;
1370 break;
1371 }
1372
1373 error_finish_iomap:
1374 ret = dax_fault_return(error);
1375 finish_iomap:
1376 if (ops->iomap_end) {
1377 int copied = PAGE_SIZE;
1378
1379 if (ret & VM_FAULT_ERROR)
1380 copied = 0;
1381
1382
1383
1384
1385
1386
1387 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1388 }
1389 unlock_entry:
1390 put_locked_mapping_entry(mapping, vmf->pgoff);
1391 out:
1392 trace_dax_pte_fault_done(inode, vmf, ret);
1393 return ret | major;
1394}
1395
1396#ifdef CONFIG_FS_DAX_PMD
1397static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1398 void *entry)
1399{
1400 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1401 unsigned long pmd_addr = vmf->address & PMD_MASK;
1402 struct inode *inode = mapping->host;
1403 struct page *zero_page;
1404 void *ret = NULL;
1405 spinlock_t *ptl;
1406 pmd_t pmd_entry;
1407 pfn_t pfn;
1408
1409 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1410
1411 if (unlikely(!zero_page))
1412 goto fallback;
1413
1414 pfn = page_to_pfn_t(zero_page);
1415 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1416 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1417
1418 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1419 if (!pmd_none(*(vmf->pmd))) {
1420 spin_unlock(ptl);
1421 goto fallback;
1422 }
1423
1424 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1425 pmd_entry = pmd_mkhuge(pmd_entry);
1426 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1427 spin_unlock(ptl);
1428 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1429 return VM_FAULT_NOPAGE;
1430
1431fallback:
1432 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1433 return VM_FAULT_FALLBACK;
1434}
1435
1436static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1437 const struct iomap_ops *ops)
1438{
1439 struct vm_area_struct *vma = vmf->vma;
1440 struct address_space *mapping = vma->vm_file->f_mapping;
1441 unsigned long pmd_addr = vmf->address & PMD_MASK;
1442 bool write = vmf->flags & FAULT_FLAG_WRITE;
1443 bool sync;
1444 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1445 struct inode *inode = mapping->host;
1446 vm_fault_t result = VM_FAULT_FALLBACK;
1447 struct iomap iomap = { 0 };
1448 pgoff_t max_pgoff, pgoff;
1449 void *entry;
1450 loff_t pos;
1451 int error;
1452 pfn_t pfn;
1453
1454
1455
1456
1457
1458
1459 pgoff = linear_page_index(vma, pmd_addr);
1460 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1461
1462 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1463
1464
1465
1466
1467
1468
1469
1470 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1471 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1472 goto fallback;
1473
1474
1475 if (write && !(vma->vm_flags & VM_SHARED))
1476 goto fallback;
1477
1478
1479 if (pmd_addr < vma->vm_start)
1480 goto fallback;
1481 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1482 goto fallback;
1483
1484 if (pgoff >= max_pgoff) {
1485 result = VM_FAULT_SIGBUS;
1486 goto out;
1487 }
1488
1489
1490 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1491 goto fallback;
1492
1493
1494
1495
1496
1497
1498
1499 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1500 if (IS_ERR(entry))
1501 goto fallback;
1502
1503
1504
1505
1506
1507
1508
1509 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1510 !pmd_devmap(*vmf->pmd)) {
1511 result = 0;
1512 goto unlock_entry;
1513 }
1514
1515
1516
1517
1518
1519
1520 pos = (loff_t)pgoff << PAGE_SHIFT;
1521 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1522 if (error)
1523 goto unlock_entry;
1524
1525 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1526 goto finish_iomap;
1527
1528 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1529
1530 switch (iomap.type) {
1531 case IOMAP_MAPPED:
1532 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1533 if (error < 0)
1534 goto finish_iomap;
1535
1536 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1537 RADIX_DAX_PMD, write && !sync);
1538
1539
1540
1541
1542
1543
1544
1545 if (sync) {
1546 if (WARN_ON_ONCE(!pfnp))
1547 goto finish_iomap;
1548 *pfnp = pfn;
1549 result = VM_FAULT_NEEDDSYNC;
1550 goto finish_iomap;
1551 }
1552
1553 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1554 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1555 write);
1556 break;
1557 case IOMAP_UNWRITTEN:
1558 case IOMAP_HOLE:
1559 if (WARN_ON_ONCE(write))
1560 break;
1561 result = dax_pmd_load_hole(vmf, &iomap, entry);
1562 break;
1563 default:
1564 WARN_ON_ONCE(1);
1565 break;
1566 }
1567
1568 finish_iomap:
1569 if (ops->iomap_end) {
1570 int copied = PMD_SIZE;
1571
1572 if (result == VM_FAULT_FALLBACK)
1573 copied = 0;
1574
1575
1576
1577
1578
1579
1580 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1581 &iomap);
1582 }
1583 unlock_entry:
1584 put_locked_mapping_entry(mapping, pgoff);
1585 fallback:
1586 if (result == VM_FAULT_FALLBACK) {
1587 split_huge_pmd(vma, vmf->pmd, vmf->address);
1588 count_vm_event(THP_FAULT_FALLBACK);
1589 }
1590out:
1591 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1592 return result;
1593}
1594#else
1595static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1596 const struct iomap_ops *ops)
1597{
1598 return VM_FAULT_FALLBACK;
1599}
1600#endif
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1616 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1617{
1618 switch (pe_size) {
1619 case PE_SIZE_PTE:
1620 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1621 case PE_SIZE_PMD:
1622 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1623 default:
1624 return VM_FAULT_FALLBACK;
1625 }
1626}
1627EXPORT_SYMBOL_GPL(dax_iomap_fault);
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1640 enum page_entry_size pe_size,
1641 pfn_t pfn)
1642{
1643 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1644 void *entry, **slot;
1645 pgoff_t index = vmf->pgoff;
1646 vm_fault_t ret;
1647
1648 xa_lock_irq(&mapping->i_pages);
1649 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1650
1651 if (!entry ||
1652 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1653 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1654 put_unlocked_mapping_entry(mapping, index, entry);
1655 xa_unlock_irq(&mapping->i_pages);
1656 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1657 VM_FAULT_NOPAGE);
1658 return VM_FAULT_NOPAGE;
1659 }
1660 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1661 entry = lock_slot(mapping, slot);
1662 xa_unlock_irq(&mapping->i_pages);
1663 switch (pe_size) {
1664 case PE_SIZE_PTE:
1665 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1666 break;
1667#ifdef CONFIG_FS_DAX_PMD
1668 case PE_SIZE_PMD:
1669 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1670 pfn, true);
1671 break;
1672#endif
1673 default:
1674 ret = VM_FAULT_FALLBACK;
1675 }
1676 put_locked_mapping_entry(mapping, index);
1677 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1678 return ret;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1692 enum page_entry_size pe_size, pfn_t pfn)
1693{
1694 int err;
1695 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1696 size_t len = 0;
1697
1698 if (pe_size == PE_SIZE_PTE)
1699 len = PAGE_SIZE;
1700 else if (pe_size == PE_SIZE_PMD)
1701 len = PMD_SIZE;
1702 else
1703 WARN_ON_ONCE(1);
1704 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1705 if (err)
1706 return VM_FAULT_SIGBUS;
1707 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1708}
1709EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1710