1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#ifndef VVP_INTERNAL_H
38#define VVP_INTERNAL_H
39
40#include "../include/lustre/lustre_idl.h"
41#include "../include/cl_object.h"
42
43enum obd_notify_event;
44struct inode;
45struct lustre_md;
46struct obd_device;
47struct obd_export;
48struct page;
49
50
51
52
53struct vvp_io {
54
55 struct cl_io_slice vui_cl;
56 struct cl_io_lock_link vui_link;
57
58
59
60 struct iov_iter *vui_iter;
61
62
63
64 size_t vui_tot_count;
65
66 union {
67 struct vvp_fault_io {
68
69
70
71
72 time64_t ft_mtime;
73 struct vm_area_struct *ft_vma;
74
75
76
77 struct page *ft_vmpage;
78
79
80
81 struct vm_fault *ft_vmf;
82
83
84
85 unsigned int ft_flags;
86
87
88
89 bool ft_flags_valid;
90 } fault;
91 struct {
92 struct cl_page_list vui_queue;
93 unsigned long vui_written;
94 int vui_from;
95 int vui_to;
96 } write;
97 } u;
98
99
100
101
102 __u32 vui_layout_gen;
103
104
105
106 struct ll_file_data *vui_fd;
107 struct kiocb *vui_iocb;
108
109
110 pgoff_t vui_ra_start;
111 pgoff_t vui_ra_count;
112
113 bool vui_ra_valid;
114};
115
116extern struct lu_device_type vvp_device_type;
117
118extern struct lu_context_key vvp_session_key;
119extern struct lu_context_key vvp_thread_key;
120
121extern struct kmem_cache *vvp_lock_kmem;
122extern struct kmem_cache *vvp_object_kmem;
123
124struct vvp_thread_info {
125 struct cl_lock vti_lock;
126 struct cl_lock_descr vti_descr;
127 struct cl_io vti_io;
128 struct cl_attr vti_attr;
129};
130
131static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
132{
133 struct vvp_thread_info *vti;
134
135 vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
136 LASSERT(vti);
137
138 return vti;
139}
140
141static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
142{
143 struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
144
145 memset(lock, 0, sizeof(*lock));
146 return lock;
147}
148
149static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
150{
151 struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
152
153 memset(attr, 0, sizeof(*attr));
154
155 return attr;
156}
157
158static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
159{
160 struct cl_io *io = &vvp_env_info(env)->vti_io;
161
162 memset(io, 0, sizeof(*io));
163
164 return io;
165}
166
167struct vvp_session {
168 struct vvp_io cs_ios;
169};
170
171static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
172{
173 struct vvp_session *ses;
174
175 ses = lu_context_key_get(env->le_ses, &vvp_session_key);
176 LASSERT(ses);
177
178 return ses;
179}
180
181static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
182{
183 return &vvp_env_session(env)->cs_ios;
184}
185
186
187
188
189struct vvp_object {
190 struct cl_object_header vob_header;
191 struct cl_object vob_cl;
192 struct inode *vob_inode;
193
194
195
196
197
198
199 atomic_t vob_transient_pages;
200
201
202
203
204
205
206 atomic_t vob_mmap_cnt;
207
208
209
210
211
212
213
214
215
216
217
218 unsigned int vob_discard_page_warned:1;
219};
220
221
222
223
224struct vvp_page {
225 struct cl_page_slice vpg_cl;
226 unsigned int vpg_defer_uptodate:1,
227 vpg_ra_used:1;
228
229 struct page *vpg_page;
230};
231
232static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
233{
234 return container_of(slice, struct vvp_page, vpg_cl);
235}
236
237static inline pgoff_t vvp_index(struct vvp_page *vvp)
238{
239 return vvp->vpg_cl.cpl_index;
240}
241
242struct vvp_device {
243 struct cl_device vdv_cl;
244 struct cl_device *vdv_next;
245};
246
247struct vvp_lock {
248 struct cl_lock_slice vlk_cl;
249};
250
251void *ccc_key_init(const struct lu_context *ctx,
252 struct lu_context_key *key);
253void ccc_key_fini(const struct lu_context *ctx,
254 struct lu_context_key *key, void *data);
255
256void ccc_umount(const struct lu_env *env, struct cl_device *dev);
257
258static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
259{
260 return &vdv->vdv_cl.cd_lu_dev;
261}
262
263static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
264{
265 return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
266}
267
268static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
269{
270 return container_of0(d, struct vvp_device, vdv_cl);
271}
272
273static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
274{
275 return container_of0(obj, struct vvp_object, vob_cl);
276}
277
278static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
279{
280 return container_of0(obj, struct vvp_object, vob_cl.co_lu);
281}
282
283static inline struct inode *vvp_object_inode(const struct cl_object *obj)
284{
285 return cl2vvp(obj)->vob_inode;
286}
287
288int vvp_object_invariant(const struct cl_object *obj);
289struct vvp_object *cl_inode2vvp(struct inode *inode);
290
291static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
292{
293 return cl2vvp_page(slice)->vpg_page;
294}
295
296static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
297{
298 return container_of(slice, struct vvp_lock, vlk_cl);
299}
300
301# define CLOBINVRNT(env, clob, expr) \
302 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
303
304int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
305 struct cl_io *io);
306int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
307int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
308 struct cl_lock *lock, const struct cl_io *io);
309int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
310 struct cl_page *page, pgoff_t index);
311struct lu_object *vvp_object_alloc(const struct lu_env *env,
312 const struct lu_object_header *hdr,
313 struct lu_device *dev);
314
315int vvp_global_init(void);
316void vvp_global_fini(void);
317
318extern const struct file_operations vvp_dump_pgcache_file_ops;
319
320#endif
321