1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _GVT_MPT_H_
34#define _GVT_MPT_H_
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 void *gvt, const void *ops)
54{
55
56 if (!intel_gvt_host.mpt->host_init)
57 return 0;
58
59 return intel_gvt_host.mpt->host_init(dev, gvt, ops);
60}
61
62
63
64
65static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
66 void *gvt)
67{
68
69 if (!intel_gvt_host.mpt->host_exit)
70 return;
71
72 intel_gvt_host.mpt->host_exit(dev, gvt);
73}
74
75
76
77
78
79
80
81
82static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
83{
84
85 if (!intel_gvt_host.mpt->attach_vgpu)
86 return 0;
87
88 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
89}
90
91
92
93
94
95
96
97
98static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
99{
100
101 if (!intel_gvt_host.mpt->detach_vgpu)
102 return;
103
104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
105}
106
107#define MSI_CAP_CONTROL(offset) (offset + 2)
108#define MSI_CAP_ADDRESS(offset) (offset + 4)
109#define MSI_CAP_DATA(offset) (offset + 8)
110#define MSI_CAP_EN 0x1
111
112
113
114
115
116
117
118static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
119{
120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
121 u16 control, data;
122 u32 addr;
123 int ret;
124
125 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
126 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
127 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
128
129
130 if (!(control & MSI_CAP_EN))
131 return 0;
132
133 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 return -EINVAL;
135
136 trace_inject_msi(vgpu->id, addr, data);
137
138 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
139 if (ret)
140 return ret;
141 return 0;
142}
143
144
145
146
147
148
149
150
151static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
152{
153 return intel_gvt_host.mpt->from_virt_to_mfn(p);
154}
155
156
157
158
159
160
161
162
163
164static inline int intel_gvt_hypervisor_enable_page_track(
165 struct intel_vgpu *vgpu, unsigned long gfn)
166{
167 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
168}
169
170
171
172
173
174
175
176
177
178static inline int intel_gvt_hypervisor_disable_page_track(
179 struct intel_vgpu *vgpu, unsigned long gfn)
180{
181 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
182}
183
184
185
186
187
188
189
190
191
192
193
194static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
195 unsigned long gpa, void *buf, unsigned long len)
196{
197 return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
198}
199
200
201
202
203
204
205
206
207
208
209
210static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
211 unsigned long gpa, void *buf, unsigned long len)
212{
213 return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
214}
215
216
217
218
219
220
221
222
223
224static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
225 struct intel_vgpu *vgpu, unsigned long gfn)
226{
227 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
228}
229
230
231
232
233
234
235
236
237
238
239
240static inline int intel_gvt_hypervisor_dma_map_guest_page(
241 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
242 dma_addr_t *dma_addr)
243{
244 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
245 dma_addr);
246}
247
248
249
250
251
252
253static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
254 struct intel_vgpu *vgpu, dma_addr_t dma_addr)
255{
256 intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
257}
258
259
260
261
262
263
264
265
266
267
268
269
270static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
271 struct intel_vgpu *vgpu, unsigned long gfn,
272 unsigned long mfn, unsigned int nr,
273 bool map)
274{
275
276 if (!intel_gvt_host.mpt->map_gfn_to_mfn)
277 return 0;
278
279 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
280 map);
281}
282
283
284
285
286
287
288
289
290
291
292
293static inline int intel_gvt_hypervisor_set_trap_area(
294 struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
295{
296
297 if (!intel_gvt_host.mpt->set_trap_area)
298 return 0;
299
300 return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
301}
302
303
304
305
306
307
308
309
310static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
311{
312 if (!intel_gvt_host.mpt->set_opregion)
313 return 0;
314
315 return intel_gvt_host.mpt->set_opregion(vgpu);
316}
317
318
319
320
321
322
323
324
325static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
326{
327 if (!intel_gvt_host.mpt->get_vfio_device)
328 return 0;
329
330 return intel_gvt_host.mpt->get_vfio_device(vgpu);
331}
332
333
334
335
336
337
338
339
340static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
341{
342 if (!intel_gvt_host.mpt->put_vfio_device)
343 return;
344
345 intel_gvt_host.mpt->put_vfio_device(vgpu);
346}
347
348
349
350
351
352
353
354
355
356static inline bool intel_gvt_hypervisor_is_valid_gfn(
357 struct intel_vgpu *vgpu, unsigned long gfn)
358{
359 if (!intel_gvt_host.mpt->is_valid_gfn)
360 return true;
361
362 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
363}
364
365#endif
366