This source file includes following definitions.
- i915_detect_vgpu
- intel_vgpu_has_full_ppgtt
- vgt_deballoon_space
- intel_vgt_deballoon
- vgt_balloon_space
- intel_vgt_balloon
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include "i915_vgpu.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60 void i915_detect_vgpu(struct drm_i915_private *dev_priv)
61 {
62 struct pci_dev *pdev = dev_priv->drm.pdev;
63 u64 magic;
64 u16 version_major;
65 void __iomem *shared_area;
66
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68
69
70
71
72
73
74
75 if (INTEL_GEN(dev_priv) < 6)
76 return;
77
78 shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
79 if (!shared_area) {
80 DRM_ERROR("failed to map MMIO bar to check for VGT\n");
81 return;
82 }
83
84 magic = readq(shared_area + vgtif_offset(magic));
85 if (magic != VGT_MAGIC)
86 goto out;
87
88 version_major = readw(shared_area + vgtif_offset(version_major));
89 if (version_major < VGT_VERSION_MAJOR) {
90 DRM_INFO("VGT interface version mismatch!\n");
91 goto out;
92 }
93
94 dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
95
96 dev_priv->vgpu.active = true;
97 mutex_init(&dev_priv->vgpu.lock);
98 DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
99
100 out:
101 pci_iounmap(pdev, shared_area);
102 }
103
104 bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
105 {
106 return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
107 }
108
109 struct _balloon_info_ {
110
111
112
113
114
115 struct drm_mm_node space[4];
116 };
117
118 static struct _balloon_info_ bl_info;
119
120 static void vgt_deballoon_space(struct i915_ggtt *ggtt,
121 struct drm_mm_node *node)
122 {
123 if (!drm_mm_node_allocated(node))
124 return;
125
126 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
127 node->start,
128 node->start + node->size,
129 node->size / 1024);
130
131 ggtt->vm.reserved -= node->size;
132 drm_mm_remove_node(node);
133 }
134
135
136
137
138
139
140
141
142 void intel_vgt_deballoon(struct i915_ggtt *ggtt)
143 {
144 int i;
145
146 if (!intel_vgpu_active(ggtt->vm.i915))
147 return;
148
149 DRM_DEBUG("VGT deballoon.\n");
150
151 for (i = 0; i < 4; i++)
152 vgt_deballoon_space(ggtt, &bl_info.space[i]);
153 }
154
155 static int vgt_balloon_space(struct i915_ggtt *ggtt,
156 struct drm_mm_node *node,
157 unsigned long start, unsigned long end)
158 {
159 unsigned long size = end - start;
160 int ret;
161
162 if (start >= end)
163 return -EINVAL;
164
165 DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
166 start, end, size / 1024);
167 ret = i915_gem_gtt_reserve(&ggtt->vm, node,
168 size, start, I915_COLOR_UNEVICTABLE,
169 0);
170 if (!ret)
171 ggtt->vm.reserved += size;
172
173 return ret;
174 }
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 int intel_vgt_balloon(struct i915_ggtt *ggtt)
221 {
222 struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
223 unsigned long ggtt_end = ggtt->vm.total;
224
225 unsigned long mappable_base, mappable_size, mappable_end;
226 unsigned long unmappable_base, unmappable_size, unmappable_end;
227 int ret;
228
229 if (!intel_vgpu_active(ggtt->vm.i915))
230 return 0;
231
232 mappable_base =
233 intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
234 mappable_size =
235 intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
236 unmappable_base =
237 intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
238 unmappable_size =
239 intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
240
241 mappable_end = mappable_base + mappable_size;
242 unmappable_end = unmappable_base + unmappable_size;
243
244 DRM_INFO("VGT ballooning configuration:\n");
245 DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
246 mappable_base, mappable_size / 1024);
247 DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
248 unmappable_base, unmappable_size / 1024);
249
250 if (mappable_end > ggtt->mappable_end ||
251 unmappable_base < ggtt->mappable_end ||
252 unmappable_end > ggtt_end) {
253 DRM_ERROR("Invalid ballooning configuration!\n");
254 return -EINVAL;
255 }
256
257
258 if (unmappable_base > ggtt->mappable_end) {
259 ret = vgt_balloon_space(ggtt, &bl_info.space[2],
260 ggtt->mappable_end, unmappable_base);
261
262 if (ret)
263 goto err;
264 }
265
266 if (unmappable_end < ggtt_end) {
267 ret = vgt_balloon_space(ggtt, &bl_info.space[3],
268 unmappable_end, ggtt_end);
269 if (ret)
270 goto err_upon_mappable;
271 }
272
273
274 if (mappable_base) {
275 ret = vgt_balloon_space(ggtt, &bl_info.space[0],
276 0, mappable_base);
277
278 if (ret)
279 goto err_upon_unmappable;
280 }
281
282 if (mappable_end < ggtt->mappable_end) {
283 ret = vgt_balloon_space(ggtt, &bl_info.space[1],
284 mappable_end, ggtt->mappable_end);
285
286 if (ret)
287 goto err_below_mappable;
288 }
289
290 DRM_INFO("VGT balloon successfully\n");
291 return 0;
292
293 err_below_mappable:
294 vgt_deballoon_space(ggtt, &bl_info.space[0]);
295 err_upon_unmappable:
296 vgt_deballoon_space(ggtt, &bl_info.space[3]);
297 err_upon_mappable:
298 vgt_deballoon_space(ggtt, &bl_info.space[2]);
299 err:
300 DRM_ERROR("VGT balloon fail\n");
301 return ret;
302 }