This source file includes following definitions.
- intel_gvt_hypervisor_host_init
- intel_gvt_hypervisor_host_exit
- intel_gvt_hypervisor_attach_vgpu
- intel_gvt_hypervisor_detach_vgpu
- intel_gvt_hypervisor_inject_msi
- intel_gvt_hypervisor_virt_to_mfn
- intel_gvt_hypervisor_enable_page_track
- intel_gvt_hypervisor_disable_page_track
- intel_gvt_hypervisor_read_gpa
- intel_gvt_hypervisor_write_gpa
- intel_gvt_hypervisor_gfn_to_mfn
- intel_gvt_hypervisor_dma_map_guest_page
- intel_gvt_hypervisor_dma_unmap_guest_page
- intel_gvt_hypervisor_map_gfn_to_mfn
- intel_gvt_hypervisor_set_trap_area
- intel_gvt_hypervisor_set_opregion
- intel_gvt_hypervisor_set_edid
- intel_gvt_hypervisor_get_vfio_device
- intel_gvt_hypervisor_put_vfio_device
- intel_gvt_hypervisor_is_valid_gfn
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 void *gvt, const void *ops)
54 {
55 if (!intel_gvt_host.mpt->host_init)
56 return -ENODEV;
57
58 return intel_gvt_host.mpt->host_init(dev, gvt, ops);
59 }
60
61
62
63
64 static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
65 {
66
67 if (!intel_gvt_host.mpt->host_exit)
68 return;
69
70 intel_gvt_host.mpt->host_exit(dev);
71 }
72
73
74
75
76
77
78
79
80 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
81 {
82
83 if (!intel_gvt_host.mpt->attach_vgpu)
84 return 0;
85
86 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
87 }
88
89
90
91
92
93
94
95
96 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
97 {
98
99 if (!intel_gvt_host.mpt->detach_vgpu)
100 return;
101
102 intel_gvt_host.mpt->detach_vgpu(vgpu);
103 }
104
105 #define MSI_CAP_CONTROL(offset) (offset + 2)
106 #define MSI_CAP_ADDRESS(offset) (offset + 4)
107 #define MSI_CAP_DATA(offset) (offset + 8)
108 #define MSI_CAP_EN 0x1
109
110
111
112
113
114
115
116 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
117 {
118 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
119 u16 control, data;
120 u32 addr;
121 int ret;
122
123 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
124 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
125 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
126
127
128 if (!(control & MSI_CAP_EN))
129 return 0;
130
131 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
132 return -EINVAL;
133
134 trace_inject_msi(vgpu->id, addr, data);
135
136 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
137 if (ret)
138 return ret;
139 return 0;
140 }
141
142
143
144
145
146
147
148
149 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
150 {
151 return intel_gvt_host.mpt->from_virt_to_mfn(p);
152 }
153
154
155
156
157
158
159
160
161
162 static inline int intel_gvt_hypervisor_enable_page_track(
163 struct intel_vgpu *vgpu, unsigned long gfn)
164 {
165 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
166 }
167
168
169
170
171
172
173
174
175
176 static inline int intel_gvt_hypervisor_disable_page_track(
177 struct intel_vgpu *vgpu, unsigned long gfn)
178 {
179 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
180 }
181
182
183
184
185
186
187
188
189
190
191
192 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
193 unsigned long gpa, void *buf, unsigned long len)
194 {
195 return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
196 }
197
198
199
200
201
202
203
204
205
206
207
208 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
209 unsigned long gpa, void *buf, unsigned long len)
210 {
211 return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
212 }
213
214
215
216
217
218
219
220
221
222 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
223 struct intel_vgpu *vgpu, unsigned long gfn)
224 {
225 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
226 }
227
228
229
230
231
232
233
234
235
236
237
238 static inline int intel_gvt_hypervisor_dma_map_guest_page(
239 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
240 dma_addr_t *dma_addr)
241 {
242 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
243 dma_addr);
244 }
245
246
247
248
249
250
251 static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
252 struct intel_vgpu *vgpu, dma_addr_t dma_addr)
253 {
254 intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
255 }
256
257
258
259
260
261
262
263
264
265
266
267
268 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
269 struct intel_vgpu *vgpu, unsigned long gfn,
270 unsigned long mfn, unsigned int nr,
271 bool map)
272 {
273
274 if (!intel_gvt_host.mpt->map_gfn_to_mfn)
275 return 0;
276
277 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
278 map);
279 }
280
281
282
283
284
285
286
287
288
289
290
291 static inline int intel_gvt_hypervisor_set_trap_area(
292 struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
293 {
294
295 if (!intel_gvt_host.mpt->set_trap_area)
296 return 0;
297
298 return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
299 }
300
301
302
303
304
305
306
307
308 static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
309 {
310 if (!intel_gvt_host.mpt->set_opregion)
311 return 0;
312
313 return intel_gvt_host.mpt->set_opregion(vgpu);
314 }
315
316
317
318
319
320
321
322
323
324 static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
325 int port_num)
326 {
327 if (!intel_gvt_host.mpt->set_edid)
328 return 0;
329
330 return intel_gvt_host.mpt->set_edid(vgpu, port_num);
331 }
332
333
334
335
336
337
338
339
340 static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
341 {
342 if (!intel_gvt_host.mpt->get_vfio_device)
343 return 0;
344
345 return intel_gvt_host.mpt->get_vfio_device(vgpu);
346 }
347
348
349
350
351
352
353
354
355 static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
356 {
357 if (!intel_gvt_host.mpt->put_vfio_device)
358 return;
359
360 intel_gvt_host.mpt->put_vfio_device(vgpu);
361 }
362
363
364
365
366
367
368
369
370
371 static inline bool intel_gvt_hypervisor_is_valid_gfn(
372 struct intel_vgpu *vgpu, unsigned long gfn)
373 {
374 if (!intel_gvt_host.mpt->is_valid_gfn)
375 return true;
376
377 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
378 }
379
380 int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
381 void intel_gvt_unregister_hypervisor(void);
382
383 #endif