• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Jike Song <jike.song@intel.com>
26  *
27  * Contributors:
28  *    Zhi Wang <zhi.a.wang@intel.com>
29  *    Min He <min.he@intel.com>
30  *    Bing Niu <bing.niu@intel.com>
31  *
32  */
33 
34 #include "i915_drv.h"
35 #include "gvt.h"
36 
37 enum {
38 	INTEL_GVT_PCI_BAR_GTTMMIO = 0,
39 	INTEL_GVT_PCI_BAR_APERTURE,
40 	INTEL_GVT_PCI_BAR_PIO,
41 	INTEL_GVT_PCI_BAR_MAX,
42 };
43 
44 /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45  * byte) byte by byte in standard pci configuration space. (not the full
46  * 256 bytes.)
47  */
48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 	[PCI_COMMAND]		= 0xff, 0x07,
50 	[PCI_STATUS]		= 0x00, 0xf9, /* the only one RW1C byte */
51 	[PCI_CACHE_LINE_SIZE]	= 0xff,
52 	[PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 	[PCI_ROM_ADDRESS]	= 0x01, 0xf8, 0xff, 0xff,
54 	[PCI_INTERRUPT_LINE]	= 0xff,
55 };
56 
57 /**
58  * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59  * @vgpu: target vgpu
60  * @off: offset
61  * @src: src ptr to write
62  * @bytes: number of bytes
63  *
64  * Use this function to write virtual cfg space memory.
65  * For standard cfg space, only RW bits can be changed,
66  * and we emulates the RW1C behavior of PCI_STATUS register.
67  */
vgpu_pci_cfg_mem_write(struct intel_vgpu * vgpu,unsigned int off,u8 * src,unsigned int bytes)68 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
69 				   u8 *src, unsigned int bytes)
70 {
71 	u8 *cfg_base = vgpu_cfg_space(vgpu);
72 	u8 mask, new, old;
73 	int i = 0;
74 
75 	for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
76 		mask = pci_cfg_space_rw_bmp[off + i];
77 		old = cfg_base[off + i];
78 		new = src[i] & mask;
79 
80 		/**
81 		 * The PCI_STATUS high byte has RW1C bits, here
82 		 * emulates clear by writing 1 for these bits.
83 		 * Writing a 0b to RW1C bits has no effect.
84 		 */
85 		if (off + i == PCI_STATUS + 1)
86 			new = (~new & old) & mask;
87 
88 		cfg_base[off + i] = (old & ~mask) | new;
89 	}
90 
91 	/* For other configuration space directly copy as it is. */
92 	if (i < bytes)
93 		memcpy(cfg_base + off + i, src + i, bytes - i);
94 }
95 
96 /**
97  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
98  * @vgpu: target vgpu
99  * @offset: offset
100  * @p_data: return data ptr
101  * @bytes: number of bytes to read
102  *
103  * Returns:
104  * Zero on success, negative error code if failed.
105  */
intel_vgpu_emulate_cfg_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)106 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
107 	void *p_data, unsigned int bytes)
108 {
109 	if (WARN_ON(bytes > 4))
110 		return -EINVAL;
111 
112 	if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
113 		return -EINVAL;
114 
115 	memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
116 	return 0;
117 }
118 
map_aperture(struct intel_vgpu * vgpu,bool map)119 static int map_aperture(struct intel_vgpu *vgpu, bool map)
120 {
121 	phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
122 	unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
123 	u64 first_gfn;
124 	u64 val;
125 	int ret;
126 
127 	if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
128 		return 0;
129 
130 	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
131 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
132 		val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
133 	else
134 		val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
135 
136 	first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
137 
138 	ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
139 						  aperture_pa >> PAGE_SHIFT,
140 						  aperture_sz >> PAGE_SHIFT,
141 						  map);
142 	if (ret)
143 		return ret;
144 
145 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
146 	return 0;
147 }
148 
trap_gttmmio(struct intel_vgpu * vgpu,bool trap)149 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
150 {
151 	u64 start, end;
152 	u64 val;
153 	int ret;
154 
155 	if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
156 		return 0;
157 
158 	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
159 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
160 		start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
161 	else
162 		start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
163 
164 	start &= ~GENMASK(3, 0);
165 	end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
166 
167 	ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
168 	if (ret)
169 		return ret;
170 
171 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
172 	return 0;
173 }
174 
emulate_pci_command_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)175 static int emulate_pci_command_write(struct intel_vgpu *vgpu,
176 	unsigned int offset, void *p_data, unsigned int bytes)
177 {
178 	u8 old = vgpu_cfg_space(vgpu)[offset];
179 	u8 new = *(u8 *)p_data;
180 	u8 changed = old ^ new;
181 	int ret;
182 
183 	vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
184 	if (!(changed & PCI_COMMAND_MEMORY))
185 		return 0;
186 
187 	if (old & PCI_COMMAND_MEMORY) {
188 		ret = trap_gttmmio(vgpu, false);
189 		if (ret)
190 			return ret;
191 		ret = map_aperture(vgpu, false);
192 		if (ret)
193 			return ret;
194 	} else {
195 		ret = trap_gttmmio(vgpu, true);
196 		if (ret)
197 			return ret;
198 		ret = map_aperture(vgpu, true);
199 		if (ret)
200 			return ret;
201 	}
202 
203 	return 0;
204 }
205 
emulate_pci_rom_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)206 static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
207 	unsigned int offset, void *p_data, unsigned int bytes)
208 {
209 	u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
210 	u32 new = *(u32 *)(p_data);
211 
212 	if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
213 		/* We don't have rom, return size of 0. */
214 		*pval = 0;
215 	else
216 		vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
217 	return 0;
218 }
219 
emulate_pci_bar_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)220 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
221 	void *p_data, unsigned int bytes)
222 {
223 	u32 new = *(u32 *)(p_data);
224 	bool lo = IS_ALIGNED(offset, 8);
225 	u64 size;
226 	int ret = 0;
227 	bool mmio_enabled =
228 		vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
229 	struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
230 
231 	/*
232 	 * Power-up software can determine how much address
233 	 * space the device requires by writing a value of
234 	 * all 1's to the register and then reading the value
235 	 * back. The device will return 0's in all don't-care
236 	 * address bits.
237 	 */
238 	if (new == 0xffffffff) {
239 		switch (offset) {
240 		case PCI_BASE_ADDRESS_0:
241 		case PCI_BASE_ADDRESS_1:
242 			size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
243 			intel_vgpu_write_pci_bar(vgpu, offset,
244 						size >> (lo ? 0 : 32), lo);
245 			/*
246 			 * Untrap the BAR, since guest hasn't configured a
247 			 * valid GPA
248 			 */
249 			ret = trap_gttmmio(vgpu, false);
250 			break;
251 		case PCI_BASE_ADDRESS_2:
252 		case PCI_BASE_ADDRESS_3:
253 			size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
254 			intel_vgpu_write_pci_bar(vgpu, offset,
255 						size >> (lo ? 0 : 32), lo);
256 			ret = map_aperture(vgpu, false);
257 			break;
258 		default:
259 			/* Unimplemented BARs */
260 			intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
261 		}
262 	} else {
263 		switch (offset) {
264 		case PCI_BASE_ADDRESS_0:
265 		case PCI_BASE_ADDRESS_1:
266 			/*
267 			 * Untrap the old BAR first, since guest has
268 			 * re-configured the BAR
269 			 */
270 			trap_gttmmio(vgpu, false);
271 			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
272 			ret = trap_gttmmio(vgpu, mmio_enabled);
273 			break;
274 		case PCI_BASE_ADDRESS_2:
275 		case PCI_BASE_ADDRESS_3:
276 			map_aperture(vgpu, false);
277 			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
278 			ret = map_aperture(vgpu, mmio_enabled);
279 			break;
280 		default:
281 			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
282 		}
283 	}
284 	return ret;
285 }
286 
287 /**
288  * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
289  * @vgpu: target vgpu
290  * @offset: offset
291  * @p_data: write data ptr
292  * @bytes: number of bytes to write
293  *
294  * Returns:
295  * Zero on success, negative error code if failed.
296  */
intel_vgpu_emulate_cfg_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)297 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
298 	void *p_data, unsigned int bytes)
299 {
300 	int ret;
301 
302 	if (WARN_ON(bytes > 4))
303 		return -EINVAL;
304 
305 	if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
306 		return -EINVAL;
307 
308 	/* First check if it's PCI_COMMAND */
309 	if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
310 		if (WARN_ON(bytes > 2))
311 			return -EINVAL;
312 		return emulate_pci_command_write(vgpu, offset, p_data, bytes);
313 	}
314 
315 	switch (rounddown(offset, 4)) {
316 	case PCI_ROM_ADDRESS:
317 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
318 			return -EINVAL;
319 		return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
320 
321 	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
322 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
323 			return -EINVAL;
324 		return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
325 
326 	case INTEL_GVT_PCI_SWSCI:
327 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
328 			return -EINVAL;
329 		ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
330 		if (ret)
331 			return ret;
332 		break;
333 
334 	case INTEL_GVT_PCI_OPREGION:
335 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
336 			return -EINVAL;
337 		ret = intel_vgpu_opregion_base_write_handler(vgpu,
338 						   *(u32 *)p_data);
339 		if (ret)
340 			return ret;
341 
342 		vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
343 		break;
344 	default:
345 		vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
346 		break;
347 	}
348 	return 0;
349 }
350 
351 /**
352  * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
353  *
354  * @vgpu: a vGPU
355  * @primary: is the vGPU presented as primary
356  *
357  */
intel_vgpu_init_cfg_space(struct intel_vgpu * vgpu,bool primary)358 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
359 			       bool primary)
360 {
361 	struct intel_gvt *gvt = vgpu->gvt;
362 	const struct intel_gvt_device_info *info = &gvt->device_info;
363 	u16 *gmch_ctl;
364 
365 	memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
366 	       info->cfg_space_size);
367 
368 	if (!primary) {
369 		vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
370 			INTEL_GVT_PCI_CLASS_VGA_OTHER;
371 		vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
372 			INTEL_GVT_PCI_CLASS_VGA_OTHER;
373 	}
374 
375 	/* Show guest that there isn't any stolen memory.*/
376 	gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
377 	*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
378 
379 	intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
380 				 gvt_aperture_pa_base(gvt), true);
381 
382 	vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
383 					     | PCI_COMMAND_MEMORY
384 					     | PCI_COMMAND_MASTER);
385 	/*
386 	 * Clear the bar upper 32bit and let guest to assign the new value
387 	 */
388 	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
389 	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
390 	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
391 	memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
392 
393 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
394 				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
395 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
396 				pci_resource_len(gvt->dev_priv->drm.pdev, 2);
397 
398 	memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
399 }
400 
401 /**
402  * intel_vgpu_reset_cfg_space - reset vGPU configuration space
403  *
404  * @vgpu: a vGPU
405  *
406  */
intel_vgpu_reset_cfg_space(struct intel_vgpu * vgpu)407 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
408 {
409 	u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
410 	bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
411 				INTEL_GVT_PCI_CLASS_VGA_OTHER;
412 
413 	if (cmd & PCI_COMMAND_MEMORY) {
414 		trap_gttmmio(vgpu, false);
415 		map_aperture(vgpu, false);
416 	}
417 
418 	/**
419 	 * Currently we only do such reset when vGPU is not
420 	 * owned by any VM, so we simply restore entire cfg
421 	 * space to default value.
422 	 */
423 	intel_vgpu_init_cfg_space(vgpu, primary);
424 }
425