1#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/radix-tree.h>
7#include <asm/pgtable.h>
8
9struct iomap_ops;
10struct dax_device;
11struct dax_operations {
12
13
14
15
16
17 long (*direct_access)(struct dax_device *, pgoff_t, long,
18 void **, pfn_t *);
19
20 int (*memcpy_fromiovecend)(struct dax_device *, pgoff_t, void *,
21 const struct iovec *, int, int);
22 int (*memcpy_toiovecend)(struct dax_device *, pgoff_t,
23 const struct iovec *, void *, int, int);
24};
25
26extern struct attribute_group dax_attribute_group;
27
28#if IS_ENABLED(CONFIG_DAX)
29struct dax_device *dax_get_by_host(const char *host);
30struct dax_device *alloc_dax(void *private, const char *host,
31 const struct dax_operations *ops);
32void put_dax(struct dax_device *dax_dev);
33void kill_dax(struct dax_device *dax_dev);
34void dax_write_cache(struct dax_device *dax_dev, bool wc);
35bool dax_write_cache_enabled(struct dax_device *dax_dev);
36#else
37static inline struct dax_device *dax_get_by_host(const char *host)
38{
39 return NULL;
40}
41static inline struct dax_device *alloc_dax(void *private, const char *host,
42 const struct dax_operations *ops)
43{
44
45
46
47
48 return NULL;
49}
50static inline void put_dax(struct dax_device *dax_dev)
51{
52}
53static inline void kill_dax(struct dax_device *dax_dev)
54{
55}
56static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
57{
58}
59static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
60{
61 return false;
62}
63#endif
64
65struct writeback_control;
66int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
67#if IS_ENABLED(CONFIG_FS_DAX)
68bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
69static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
70{
71 return __bdev_dax_supported(bdev, blocksize);
72}
73
74static inline struct dax_device *fs_dax_get_by_host(const char *host)
75{
76 return dax_get_by_host(host);
77}
78
79static inline void fs_put_dax(struct dax_device *dax_dev)
80{
81 put_dax(dax_dev);
82}
83
84struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
85int dax_writeback_mapping_range(struct address_space *mapping,
86 struct block_device *bdev, struct writeback_control *wbc);
87
88struct page *dax_layout_busy_page(struct address_space *mapping);
89bool dax_lock_mapping_entry(struct page *page);
90void dax_unlock_mapping_entry(struct page *page);
91#else
92static inline bool bdev_dax_supported(struct block_device *bdev,
93 int blocksize)
94{
95 return false;
96}
97
98static inline struct dax_device *fs_dax_get_by_host(const char *host)
99{
100 return NULL;
101}
102
103static inline void fs_put_dax(struct dax_device *dax_dev)
104{
105}
106
107static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
108{
109 return NULL;
110}
111
112static inline struct page *dax_layout_busy_page(struct address_space *mapping)
113{
114 return NULL;
115}
116
117static inline int dax_writeback_mapping_range(struct address_space *mapping,
118 struct block_device *bdev, struct writeback_control *wbc)
119{
120 return -EOPNOTSUPP;
121}
122
123static inline bool dax_lock_mapping_entry(struct page *page)
124{
125 if (IS_DAX(page->mapping->host))
126 return true;
127 return false;
128}
129
130static inline void dax_unlock_mapping_entry(struct page *page)
131{
132}
133#endif
134
135int dax_read_lock(void);
136void dax_read_unlock(int id);
137bool dax_alive(struct dax_device *dax_dev);
138void *dax_get_private(struct dax_device *dax_dev);
139long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
140 void **kaddr, pfn_t *pfn);
141int dax_memcpy_fromiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
142 void *addr, const struct iovec *iov, int offset, int len);
143int dax_memcpy_toiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
144 const struct iovec *iov, void *addr, int offset, int len);
145void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
146
147ssize_t dax_iomap_rw(int rw, struct kiocb *iocb, const struct iovec *iov,
148 unsigned long nr_segs, loff_t pos,
149 size_t count, const struct iomap_ops *ops);
150int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
151 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
152int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
153int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
154 pfn_t pfn);
155int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
156int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
157 pgoff_t index);
158
159#ifdef CONFIG_FS_DAX
160int __dax_zero_page_range(struct block_device *bdev,
161 struct dax_device *dax_dev, sector_t sector,
162 unsigned int offset, unsigned int length);
163#else
164static inline int __dax_zero_page_range(struct block_device *bdev,
165 struct dax_device *dax_dev, sector_t sector,
166 unsigned int offset, unsigned int length)
167{
168 return -ENXIO;
169}
170#endif
171
172static inline bool dax_mapping(struct address_space *mapping)
173{
174 return mapping->host && IS_DAX(mapping->host);
175}
176
177#endif
178