1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/acpi.h>
25#include "i915_drv.h"
26#include "gvt.h"
27
28static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
29{
30 u8 *buf;
31 int i;
32
33 if (WARN((vgpu_opregion(vgpu)->va),
34 "vgpu%d: opregion has been initialized already.\n",
35 vgpu->id))
36 return -EINVAL;
37
38 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
39 __GFP_ZERO,
40 get_order(INTEL_GVT_OPREGION_SIZE));
41
42 if (!vgpu_opregion(vgpu)->va)
43 return -ENOMEM;
44
45 memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
46 INTEL_GVT_OPREGION_SIZE);
47
48 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
49 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
50
51
52
53
54
55 buf = (u8 *)vgpu_opregion(vgpu)->va;
56 buf[INTEL_GVT_OPREGION_CLID] = 0x3;
57
58 return 0;
59}
60
61static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
62{
63 u64 mfn;
64 int i, ret;
65
66 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 + i * PAGE_SIZE);
69 if (mfn == INTEL_GVT_INVALID_ADDR) {
70 gvt_vgpu_err("fail to get MFN from VA\n");
71 return -EINVAL;
72 }
73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 vgpu_opregion(vgpu)->gfn[i],
75 mfn, 1, map);
76 if (ret) {
77 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 ret);
79 return ret;
80 }
81 }
82 return 0;
83}
84
85
86
87
88
89
90void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
91{
92 gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
93
94 if (!vgpu_opregion(vgpu)->va)
95 return;
96
97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 map_vgpu_opregion(vgpu, false);
99 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 get_order(INTEL_GVT_OPREGION_SIZE));
101
102 vgpu_opregion(vgpu)->va = NULL;
103 }
104}
105
106
107
108
109
110
111
112
113
114int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
115{
116 int ret;
117
118 gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
119
120 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
121 gvt_dbg_core("emulate opregion from kernel\n");
122
123 ret = init_vgpu_opregion(vgpu, gpa);
124 if (ret)
125 return ret;
126
127 ret = map_vgpu_opregion(vgpu, true);
128 if (ret)
129 return ret;
130 }
131
132 return 0;
133}
134
135
136
137
138
139
140void intel_gvt_clean_opregion(struct intel_gvt *gvt)
141{
142 memunmap(gvt->opregion.opregion_va);
143 gvt->opregion.opregion_va = NULL;
144}
145
146
147
148
149
150
151
152
153int intel_gvt_init_opregion(struct intel_gvt *gvt)
154{
155 gvt_dbg_core("init host opregion\n");
156
157 pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
158 &gvt->opregion.opregion_pa);
159
160 gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
161 INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
162 if (!gvt->opregion.opregion_va) {
163 gvt_err("fail to map host opregion\n");
164 return -EFAULT;
165 }
166 return 0;
167}
168
169#define GVT_OPREGION_FUNC(scic) \
170 ({ \
171 u32 __ret; \
172 __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
173 OPREGION_SCIC_FUNC_SHIFT; \
174 __ret; \
175 })
176
177#define GVT_OPREGION_SUBFUNC(scic) \
178 ({ \
179 u32 __ret; \
180 __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
181 OPREGION_SCIC_SUBFUNC_SHIFT; \
182 __ret; \
183 })
184
185static const char *opregion_func_name(u32 func)
186{
187 const char *name = NULL;
188
189 switch (func) {
190 case 0 ... 3:
191 case 5:
192 case 7 ... 15:
193 name = "Reserved";
194 break;
195
196 case 4:
197 name = "Get BIOS Data";
198 break;
199
200 case 6:
201 name = "System BIOS Callbacks";
202 break;
203
204 default:
205 name = "Unknown";
206 break;
207 }
208 return name;
209}
210
211static const char *opregion_subfunc_name(u32 subfunc)
212{
213 const char *name = NULL;
214
215 switch (subfunc) {
216 case 0:
217 name = "Supported Calls";
218 break;
219
220 case 1:
221 name = "Requested Callbacks";
222 break;
223
224 case 2 ... 3:
225 case 8 ... 9:
226 name = "Reserved";
227 break;
228
229 case 5:
230 name = "Boot Display";
231 break;
232
233 case 6:
234 name = "TV-Standard/Video-Connector";
235 break;
236
237 case 7:
238 name = "Internal Graphics";
239 break;
240
241 case 10:
242 name = "Spread Spectrum Clocks";
243 break;
244
245 case 11:
246 name = "Get AKSV";
247 break;
248
249 default:
250 name = "Unknown";
251 break;
252 }
253 return name;
254};
255
256static bool querying_capabilities(u32 scic)
257{
258 u32 func, subfunc;
259
260 func = GVT_OPREGION_FUNC(scic);
261 subfunc = GVT_OPREGION_SUBFUNC(scic);
262
263 if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
264 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
265 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
266 subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
267 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
268 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
269 return true;
270 }
271 return false;
272}
273
274
275
276
277
278
279
280
281
282int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
283{
284 u32 *scic, *parm;
285 u32 func, subfunc;
286
287 scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
288 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
289
290 if (!(swsci & SWSCI_SCI_SELECT)) {
291 gvt_vgpu_err("requesting SMI service\n");
292 return 0;
293 }
294
295 if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
296 & SWSCI_SCI_TRIGGER) ||
297 !(swsci & SWSCI_SCI_TRIGGER)) {
298 return 0;
299 }
300
301 func = GVT_OPREGION_FUNC(*scic);
302 subfunc = GVT_OPREGION_SUBFUNC(*scic);
303 if (!querying_capabilities(*scic)) {
304 gvt_vgpu_err("requesting runtime service: func \"%s\","
305 " subfunc \"%s\"\n",
306 opregion_func_name(func),
307 opregion_subfunc_name(subfunc));
308
309
310
311
312 *scic &= ~OPREGION_SCIC_EXIT_MASK;
313 return 0;
314 }
315
316 *scic = 0;
317 *parm = 0;
318 return 0;
319}
320