1
2#ifndef _LINUX_DAX_H
3#define _LINUX_DAX_H
4
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/radix-tree.h>
8
9
10#define DAXDEV_F_SYNC (1UL << 0)
11
12typedef unsigned long dax_entry_t;
13
14struct iomap_ops;
15struct iomap;
16struct dax_device;
17struct dax_operations {
18
19
20
21
22
23 long (*direct_access)(struct dax_device *, pgoff_t, long,
24 void **, pfn_t *);
25
26
27
28
29 bool (*dax_supported)(struct dax_device *, struct block_device *, int,
30 sector_t, sector_t);
31
32 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
33 struct iov_iter *);
34
35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
36 struct iov_iter *);
37
38 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
39};
40
41extern struct attribute_group dax_attribute_group;
42
43#if IS_ENABLED(CONFIG_DAX)
44struct dax_device *dax_get_by_host(const char *host);
45struct dax_device *alloc_dax(void *private, const char *host,
46 const struct dax_operations *ops, unsigned long flags);
47void put_dax(struct dax_device *dax_dev);
48void kill_dax(struct dax_device *dax_dev);
49void dax_write_cache(struct dax_device *dax_dev, bool wc);
50bool dax_write_cache_enabled(struct dax_device *dax_dev);
51bool __dax_synchronous(struct dax_device *dax_dev);
52static inline bool dax_synchronous(struct dax_device *dax_dev)
53{
54 return __dax_synchronous(dax_dev);
55}
56void __set_dax_synchronous(struct dax_device *dax_dev);
57static inline void set_dax_synchronous(struct dax_device *dax_dev)
58{
59 __set_dax_synchronous(dax_dev);
60}
61bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
62 int blocksize, sector_t start, sector_t len);
63
64
65
66static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
67 struct dax_device *dax_dev)
68{
69 if (!(vma->vm_flags & VM_SYNC))
70 return true;
71 if (!IS_DAX(file_inode(vma->vm_file)))
72 return false;
73 return dax_synchronous(dax_dev);
74}
75#else
76static inline struct dax_device *dax_get_by_host(const char *host)
77{
78 return NULL;
79}
80static inline struct dax_device *alloc_dax(void *private, const char *host,
81 const struct dax_operations *ops, unsigned long flags)
82{
83
84
85
86
87 return NULL;
88}
89static inline void put_dax(struct dax_device *dax_dev)
90{
91}
92static inline void kill_dax(struct dax_device *dax_dev)
93{
94}
95static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
96{
97}
98static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
99{
100 return false;
101}
102static inline bool dax_synchronous(struct dax_device *dax_dev)
103{
104 return true;
105}
106static inline void set_dax_synchronous(struct dax_device *dax_dev)
107{
108}
109static inline bool dax_supported(struct dax_device *dax_dev,
110 struct block_device *bdev, int blocksize, sector_t start,
111 sector_t len)
112{
113 return false;
114}
115static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
116 struct dax_device *dax_dev)
117{
118 return !(vma->vm_flags & VM_SYNC);
119}
120#endif
121
122struct writeback_control;
123int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
124#if IS_ENABLED(CONFIG_FS_DAX)
125bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
126static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
127{
128 return __bdev_dax_supported(bdev, blocksize);
129}
130
131bool __generic_fsdax_supported(struct dax_device *dax_dev,
132 struct block_device *bdev, int blocksize, sector_t start,
133 sector_t sectors);
134static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
135 struct block_device *bdev, int blocksize, sector_t start,
136 sector_t sectors)
137{
138 return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
139 sectors);
140}
141
142static inline void fs_put_dax(struct dax_device *dax_dev)
143{
144 put_dax(dax_dev);
145}
146
147struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
148int dax_writeback_mapping_range(struct address_space *mapping,
149 struct dax_device *dax_dev, struct writeback_control *wbc);
150
151struct page *dax_layout_busy_page(struct address_space *mapping);
152struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
153dax_entry_t dax_lock_page(struct page *page);
154void dax_unlock_page(struct page *page, dax_entry_t cookie);
155#else
156static inline bool bdev_dax_supported(struct block_device *bdev,
157 int blocksize)
158{
159 return false;
160}
161
162static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
163 struct block_device *bdev, int blocksize, sector_t start,
164 sector_t sectors)
165{
166 return false;
167}
168
169static inline void fs_put_dax(struct dax_device *dax_dev)
170{
171}
172
173static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
174{
175 return NULL;
176}
177
178static inline struct page *dax_layout_busy_page(struct address_space *mapping)
179{
180 return NULL;
181}
182
183static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
184{
185 return NULL;
186}
187
188static inline int dax_writeback_mapping_range(struct address_space *mapping,
189 struct dax_device *dax_dev, struct writeback_control *wbc)
190{
191 return -EOPNOTSUPP;
192}
193
194static inline dax_entry_t dax_lock_page(struct page *page)
195{
196 if (IS_DAX(page->mapping->host))
197 return ~0UL;
198 return 0;
199}
200
201static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
202{
203}
204#endif
205
206#if IS_ENABLED(CONFIG_DAX)
207int dax_read_lock(void);
208void dax_read_unlock(int id);
209#else
210static inline int dax_read_lock(void)
211{
212 return 0;
213}
214
215static inline void dax_read_unlock(int id)
216{
217}
218#endif
219bool dax_alive(struct dax_device *dax_dev);
220void *dax_get_private(struct dax_device *dax_dev);
221long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
222 void **kaddr, pfn_t *pfn);
223size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
224 size_t bytes, struct iov_iter *i);
225size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
226 size_t bytes, struct iov_iter *i);
227int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
228 size_t nr_pages);
229void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
230
231ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
232 const struct iomap_ops *ops);
233vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
234 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
235vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
236 enum page_entry_size pe_size, pfn_t pfn);
237int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
238int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
239 pgoff_t index);
240s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
241static inline bool dax_mapping(struct address_space *mapping)
242{
243 return mapping->host && IS_DAX(mapping->host);
244}
245
246#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
247void hmem_register_device(int target_nid, struct resource *r);
248#else
249static inline void hmem_register_device(int target_nid, struct resource *r)
250{
251}
252#endif
253
254#endif
255