1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Jike Song <jike.song@intel.com>
26 *
27 * Contributors:
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Min He <min.he@intel.com>
30 * Bing Niu <bing.niu@intel.com>
31 *
32 */
33
34 #include "i915_drv.h"
35 #include "gvt.h"
36
37 enum {
38 INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39 INTEL_GVT_PCI_BAR_APERTURE,
40 INTEL_GVT_PCI_BAR_PIO,
41 INTEL_GVT_PCI_BAR_MAX,
42 };
43
44 /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45 * byte) byte by byte in standard pci configuration space. (not the full
46 * 256 bytes.)
47 */
48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
55 };
56
57 /**
58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59 * @vgpu: target vgpu
60 * @off: offset
61 * @src: src ptr to write
62 * @bytes: number of bytes
63 *
64 * Use this function to write virtual cfg space memory.
65 * For standard cfg space, only RW bits can be changed,
66 * and we emulates the RW1C behavior of PCI_STATUS register.
67 */
vgpu_pci_cfg_mem_write(struct intel_vgpu * vgpu,unsigned int off,u8 * src,unsigned int bytes)68 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
69 u8 *src, unsigned int bytes)
70 {
71 u8 *cfg_base = vgpu_cfg_space(vgpu);
72 u8 mask, new, old;
73 pci_power_t pwr;
74 int i = 0;
75
76 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
77 mask = pci_cfg_space_rw_bmp[off + i];
78 old = cfg_base[off + i];
79 new = src[i] & mask;
80
81 /**
82 * The PCI_STATUS high byte has RW1C bits, here
83 * emulates clear by writing 1 for these bits.
84 * Writing a 0b to RW1C bits has no effect.
85 */
86 if (off + i == PCI_STATUS + 1)
87 new = (~new & old) & mask;
88
89 cfg_base[off + i] = (old & ~mask) | new;
90 }
91
92 /* For other configuration space directly copy as it is. */
93 if (i < bytes)
94 memcpy(cfg_base + off + i, src + i, bytes - i);
95
96 if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
97 pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
98 & PCI_PM_CTRL_STATE_MASK);
99 if (pwr == PCI_D3hot)
100 vgpu->d3_entered = true;
101 gvt_dbg_core("vgpu-%d power status changed to %d\n",
102 vgpu->id, pwr);
103 }
104 }
105
106 /**
107 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
108 * @vgpu: target vgpu
109 * @offset: offset
110 * @p_data: return data ptr
111 * @bytes: number of bytes to read
112 *
113 * Returns:
114 * Zero on success, negative error code if failed.
115 */
intel_vgpu_emulate_cfg_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)116 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
117 void *p_data, unsigned int bytes)
118 {
119 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
120
121 if (drm_WARN_ON(&i915->drm, bytes > 4))
122 return -EINVAL;
123
124 if (drm_WARN_ON(&i915->drm,
125 offset + bytes > vgpu->gvt->device_info.cfg_space_size))
126 return -EINVAL;
127
128 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
129 return 0;
130 }
131
map_aperture(struct intel_vgpu * vgpu,bool map)132 static int map_aperture(struct intel_vgpu *vgpu, bool map)
133 {
134 phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
135 unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
136 u64 first_gfn;
137 u64 val;
138 int ret;
139
140 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
141 return 0;
142
143 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
144 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
145 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
146 else
147 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
148
149 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
150
151 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
152 aperture_pa >> PAGE_SHIFT,
153 aperture_sz >> PAGE_SHIFT,
154 map);
155 if (ret)
156 return ret;
157
158 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
159 return 0;
160 }
161
trap_gttmmio(struct intel_vgpu * vgpu,bool trap)162 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
163 {
164 u64 start, end;
165 u64 val;
166 int ret;
167
168 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
169 return 0;
170
171 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
172 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
173 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
174 else
175 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
176
177 start &= ~GENMASK(3, 0);
178 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
179
180 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
181 if (ret)
182 return ret;
183
184 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
185 return 0;
186 }
187
emulate_pci_command_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)188 static int emulate_pci_command_write(struct intel_vgpu *vgpu,
189 unsigned int offset, void *p_data, unsigned int bytes)
190 {
191 u8 old = vgpu_cfg_space(vgpu)[offset];
192 u8 new = *(u8 *)p_data;
193 u8 changed = old ^ new;
194 int ret;
195
196 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
197 if (!(changed & PCI_COMMAND_MEMORY))
198 return 0;
199
200 if (old & PCI_COMMAND_MEMORY) {
201 ret = trap_gttmmio(vgpu, false);
202 if (ret)
203 return ret;
204 ret = map_aperture(vgpu, false);
205 if (ret)
206 return ret;
207 } else {
208 ret = trap_gttmmio(vgpu, true);
209 if (ret)
210 return ret;
211 ret = map_aperture(vgpu, true);
212 if (ret)
213 return ret;
214 }
215
216 return 0;
217 }
218
emulate_pci_rom_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)219 static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
220 unsigned int offset, void *p_data, unsigned int bytes)
221 {
222 u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
223 u32 new = *(u32 *)(p_data);
224
225 if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
226 /* We don't have rom, return size of 0. */
227 *pval = 0;
228 else
229 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
230 return 0;
231 }
232
emulate_pci_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)233 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
234 void *p_data, unsigned int bytes)
235 {
236 u32 new = *(u32 *)(p_data);
237 bool lo = IS_ALIGNED(offset, 8);
238 u64 size;
239 int ret = 0;
240 bool mmio_enabled =
241 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
242 struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
243
244 /*
245 * Power-up software can determine how much address
246 * space the device requires by writing a value of
247 * all 1's to the register and then reading the value
248 * back. The device will return 0's in all don't-care
249 * address bits.
250 */
251 if (new == 0xffffffff) {
252 switch (offset) {
253 case PCI_BASE_ADDRESS_0:
254 case PCI_BASE_ADDRESS_1:
255 size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
256 intel_vgpu_write_pci_bar(vgpu, offset,
257 size >> (lo ? 0 : 32), lo);
258 /*
259 * Untrap the BAR, since guest hasn't configured a
260 * valid GPA
261 */
262 ret = trap_gttmmio(vgpu, false);
263 break;
264 case PCI_BASE_ADDRESS_2:
265 case PCI_BASE_ADDRESS_3:
266 size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
267 intel_vgpu_write_pci_bar(vgpu, offset,
268 size >> (lo ? 0 : 32), lo);
269 ret = map_aperture(vgpu, false);
270 break;
271 default:
272 /* Unimplemented BARs */
273 intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
274 }
275 } else {
276 switch (offset) {
277 case PCI_BASE_ADDRESS_0:
278 case PCI_BASE_ADDRESS_1:
279 /*
280 * Untrap the old BAR first, since guest has
281 * re-configured the BAR
282 */
283 trap_gttmmio(vgpu, false);
284 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
285 ret = trap_gttmmio(vgpu, mmio_enabled);
286 break;
287 case PCI_BASE_ADDRESS_2:
288 case PCI_BASE_ADDRESS_3:
289 map_aperture(vgpu, false);
290 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
291 ret = map_aperture(vgpu, mmio_enabled);
292 break;
293 default:
294 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
295 }
296 }
297 return ret;
298 }
299
300 /**
301 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
302 * @vgpu: target vgpu
303 * @offset: offset
304 * @p_data: write data ptr
305 * @bytes: number of bytes to write
306 *
307 * Returns:
308 * Zero on success, negative error code if failed.
309 */
intel_vgpu_emulate_cfg_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)310 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
311 void *p_data, unsigned int bytes)
312 {
313 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
314 int ret;
315
316 if (drm_WARN_ON(&i915->drm, bytes > 4))
317 return -EINVAL;
318
319 if (drm_WARN_ON(&i915->drm,
320 offset + bytes > vgpu->gvt->device_info.cfg_space_size))
321 return -EINVAL;
322
323 /* First check if it's PCI_COMMAND */
324 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
325 if (drm_WARN_ON(&i915->drm, bytes > 2))
326 return -EINVAL;
327 return emulate_pci_command_write(vgpu, offset, p_data, bytes);
328 }
329
330 switch (rounddown(offset, 4)) {
331 case PCI_ROM_ADDRESS:
332 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
333 return -EINVAL;
334 return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
335
336 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
337 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
338 return -EINVAL;
339 return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
340
341 case INTEL_GVT_PCI_SWSCI:
342 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
343 return -EINVAL;
344 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
345 if (ret)
346 return ret;
347 break;
348
349 case INTEL_GVT_PCI_OPREGION:
350 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
351 return -EINVAL;
352 ret = intel_vgpu_opregion_base_write_handler(vgpu,
353 *(u32 *)p_data);
354 if (ret)
355 return ret;
356
357 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
358 break;
359 default:
360 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
361 break;
362 }
363 return 0;
364 }
365
366 /**
367 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
368 *
369 * @vgpu: a vGPU
370 * @primary: is the vGPU presented as primary
371 *
372 */
intel_vgpu_init_cfg_space(struct intel_vgpu * vgpu,bool primary)373 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
374 bool primary)
375 {
376 struct intel_gvt *gvt = vgpu->gvt;
377 const struct intel_gvt_device_info *info = &gvt->device_info;
378 u16 *gmch_ctl;
379 u8 next;
380
381 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
382 info->cfg_space_size);
383
384 if (!primary) {
385 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
386 INTEL_GVT_PCI_CLASS_VGA_OTHER;
387 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
388 INTEL_GVT_PCI_CLASS_VGA_OTHER;
389 }
390
391 /* Show guest that there isn't any stolen memory.*/
392 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
393 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
394
395 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
396 gvt_aperture_pa_base(gvt), true);
397
398 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
399 | PCI_COMMAND_MEMORY
400 | PCI_COMMAND_MASTER);
401 /*
402 * Clear the bar upper 32bit and let guest to assign the new value
403 */
404 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
405 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
406 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
407 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
408
409 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
410 pci_resource_len(gvt->gt->i915->drm.pdev, 0);
411 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
412 pci_resource_len(gvt->gt->i915->drm.pdev, 2);
413
414 memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
415
416 /* PM Support */
417 vgpu->cfg_space.pmcsr_off = 0;
418 if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
419 next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
420 do {
421 if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
422 vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
423 break;
424 }
425 next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
426 } while (next);
427 }
428 }
429
430 /**
431 * intel_vgpu_reset_cfg_space - reset vGPU configuration space
432 *
433 * @vgpu: a vGPU
434 *
435 */
intel_vgpu_reset_cfg_space(struct intel_vgpu * vgpu)436 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
437 {
438 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
439 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
440 INTEL_GVT_PCI_CLASS_VGA_OTHER;
441
442 if (cmd & PCI_COMMAND_MEMORY) {
443 trap_gttmmio(vgpu, false);
444 map_aperture(vgpu, false);
445 }
446
447 /**
448 * Currently we only do such reset when vGPU is not
449 * owned by any VM, so we simply restore entire cfg
450 * space to default value.
451 */
452 intel_vgpu_init_cfg_space(vgpu, primary);
453 }
454