• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Mediated virtual PCI display host device driver
4  *
5  * See mdpy-defs.h for device specs
6  *
7  *   (c) Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * based on mtty driver which is:
10  *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11  *	 Author: Neo Jia <cjia@nvidia.com>
12  *		 Kirti Wankhede <kwankhede@nvidia.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/cdev.h>
25 #include <linux/vfio.h>
26 #include <linux/iommu.h>
27 #include <linux/sysfs.h>
28 #include <linux/mdev.h>
29 #include <linux/pci.h>
30 #include <drm/drm_fourcc.h>
31 #include "mdpy-defs.h"
32 
33 #define MDPY_NAME		"mdpy"
34 #define MDPY_CLASS_NAME		"mdpy"
35 
36 #define MDPY_CONFIG_SPACE_SIZE	0xff
37 #define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
38 #define MDPY_DISPLAY_REGION	16
39 
40 #define STORE_LE16(addr, val)	(*(u16 *)addr = val)
41 #define STORE_LE32(addr, val)	(*(u32 *)addr = val)
42 
43 
44 MODULE_LICENSE("GPL v2");
45 
46 static int max_devices = 4;
47 module_param_named(count, max_devices, int, 0444);
48 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
49 
50 
51 #define MDPY_TYPE_1 "vga"
52 #define MDPY_TYPE_2 "xga"
53 #define MDPY_TYPE_3 "hd"
54 
55 static const struct mdpy_type {
56 	const char *name;
57 	u32 format;
58 	u32 bytepp;
59 	u32 width;
60 	u32 height;
61 } mdpy_types[] = {
62 	{
63 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
64 		.format = DRM_FORMAT_XRGB8888,
65 		.bytepp = 4,
66 		.width	= 640,
67 		.height = 480,
68 	}, {
69 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
70 		.format = DRM_FORMAT_XRGB8888,
71 		.bytepp = 4,
72 		.width	= 1024,
73 		.height = 768,
74 	}, {
75 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
76 		.format = DRM_FORMAT_XRGB8888,
77 		.bytepp = 4,
78 		.width	= 1920,
79 		.height = 1080,
80 	},
81 };
82 
83 static dev_t		mdpy_devt;
84 static struct class	*mdpy_class;
85 static struct cdev	mdpy_cdev;
86 static struct device	mdpy_dev;
87 static u32		mdpy_count;
88 static const struct vfio_device_ops mdpy_dev_ops;
89 
90 /* State of each mdev device */
91 struct mdev_state {
92 	struct vfio_device vdev;
93 	u8 *vconfig;
94 	u32 bar_mask;
95 	struct mutex ops_lock;
96 	struct mdev_device *mdev;
97 	struct vfio_device_info dev_info;
98 
99 	const struct mdpy_type *type;
100 	u32 memsize;
101 	void *memblk;
102 };
103 
mdpy_create_config_space(struct mdev_state * mdev_state)104 static void mdpy_create_config_space(struct mdev_state *mdev_state)
105 {
106 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
107 		   MDPY_PCI_VENDOR_ID);
108 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
109 		   MDPY_PCI_DEVICE_ID);
110 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
111 		   MDPY_PCI_SUBVENDOR_ID);
112 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
113 		   MDPY_PCI_SUBDEVICE_ID);
114 
115 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
116 		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
117 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
118 		   PCI_STATUS_CAP_LIST);
119 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
120 		   PCI_CLASS_DISPLAY_OTHER);
121 	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
122 
123 	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
124 		   PCI_BASE_ADDRESS_SPACE_MEMORY |
125 		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
126 		   PCI_BASE_ADDRESS_MEM_PREFETCH);
127 	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
128 
129 	/* vendor specific capability for the config registers */
130 	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
131 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
132 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
133 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
134 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
135 		   mdev_state->type->format);
136 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
137 		   mdev_state->type->width);
138 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
139 		   mdev_state->type->height);
140 }
141 
handle_pci_cfg_write(struct mdev_state * mdev_state,u16 offset,char * buf,u32 count)142 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
143 				 char *buf, u32 count)
144 {
145 	struct device *dev = mdev_dev(mdev_state->mdev);
146 	u32 cfg_addr;
147 
148 	switch (offset) {
149 	case PCI_BASE_ADDRESS_0:
150 		cfg_addr = *(u32 *)buf;
151 
152 		if (cfg_addr == 0xffffffff) {
153 			cfg_addr = (cfg_addr & mdev_state->bar_mask);
154 		} else {
155 			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
156 			if (cfg_addr)
157 				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
158 		}
159 
160 		cfg_addr |= (mdev_state->vconfig[offset] &
161 			     ~PCI_BASE_ADDRESS_MEM_MASK);
162 		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
163 		break;
164 	}
165 }
166 
mdev_access(struct mdev_state * mdev_state,char * buf,size_t count,loff_t pos,bool is_write)167 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
168 			   size_t count, loff_t pos, bool is_write)
169 {
170 	int ret = 0;
171 
172 	mutex_lock(&mdev_state->ops_lock);
173 
174 	if (pos < MDPY_CONFIG_SPACE_SIZE) {
175 		if (is_write)
176 			handle_pci_cfg_write(mdev_state, pos, buf, count);
177 		else
178 			memcpy(buf, (mdev_state->vconfig + pos), count);
179 
180 	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
181 		   (pos + count <=
182 		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
183 		pos -= MDPY_MEMORY_BAR_OFFSET;
184 		if (is_write)
185 			memcpy(mdev_state->memblk, buf, count);
186 		else
187 			memcpy(buf, mdev_state->memblk, count);
188 
189 	} else {
190 		dev_info(mdev_state->vdev.dev,
191 			 "%s: %s @0x%llx (unhandled)\n", __func__,
192 			 is_write ? "WR" : "RD", pos);
193 		ret = -1;
194 		goto accessfailed;
195 	}
196 
197 	ret = count;
198 
199 
200 accessfailed:
201 	mutex_unlock(&mdev_state->ops_lock);
202 
203 	return ret;
204 }
205 
mdpy_reset(struct mdev_state * mdev_state)206 static int mdpy_reset(struct mdev_state *mdev_state)
207 {
208 	u32 stride, i;
209 
210 	/* initialize with gray gradient */
211 	stride = mdev_state->type->width * mdev_state->type->bytepp;
212 	for (i = 0; i < mdev_state->type->height; i++)
213 		memset(mdev_state->memblk + i * stride,
214 		       i * 255 / mdev_state->type->height,
215 		       stride);
216 	return 0;
217 }
218 
mdpy_probe(struct mdev_device * mdev)219 static int mdpy_probe(struct mdev_device *mdev)
220 {
221 	const struct mdpy_type *type =
222 		&mdpy_types[mdev_get_type_group_id(mdev)];
223 	struct device *dev = mdev_dev(mdev);
224 	struct mdev_state *mdev_state;
225 	u32 fbsize;
226 	int ret;
227 
228 	if (mdpy_count >= max_devices)
229 		return -ENOMEM;
230 
231 	mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
232 	if (mdev_state == NULL)
233 		return -ENOMEM;
234 	vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mdpy_dev_ops);
235 
236 	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
237 	if (mdev_state->vconfig == NULL) {
238 		ret = -ENOMEM;
239 		goto err_state;
240 	}
241 
242 	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
243 
244 	mdev_state->memblk = vmalloc_user(fbsize);
245 	if (!mdev_state->memblk) {
246 		ret = -ENOMEM;
247 		goto err_vconfig;
248 	}
249 	dev_info(dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width,
250 		 type->height);
251 
252 	mutex_init(&mdev_state->ops_lock);
253 	mdev_state->mdev = mdev;
254 	mdev_state->type    = type;
255 	mdev_state->memsize = fbsize;
256 	mdpy_create_config_space(mdev_state);
257 	mdpy_reset(mdev_state);
258 
259 	mdpy_count++;
260 
261 	ret = vfio_register_group_dev(&mdev_state->vdev);
262 	if (ret)
263 		goto err_mem;
264 	dev_set_drvdata(&mdev->dev, mdev_state);
265 	return 0;
266 err_mem:
267 	vfree(mdev_state->memblk);
268 err_vconfig:
269 	kfree(mdev_state->vconfig);
270 err_state:
271 	vfio_uninit_group_dev(&mdev_state->vdev);
272 	kfree(mdev_state);
273 	return ret;
274 }
275 
mdpy_remove(struct mdev_device * mdev)276 static void mdpy_remove(struct mdev_device *mdev)
277 {
278 	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
279 
280 	dev_info(&mdev->dev, "%s\n", __func__);
281 
282 	vfio_unregister_group_dev(&mdev_state->vdev);
283 	vfree(mdev_state->memblk);
284 	kfree(mdev_state->vconfig);
285 	vfio_uninit_group_dev(&mdev_state->vdev);
286 	kfree(mdev_state);
287 
288 	mdpy_count--;
289 }
290 
mdpy_read(struct vfio_device * vdev,char __user * buf,size_t count,loff_t * ppos)291 static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
292 			 size_t count, loff_t *ppos)
293 {
294 	struct mdev_state *mdev_state =
295 		container_of(vdev, struct mdev_state, vdev);
296 	unsigned int done = 0;
297 	int ret;
298 
299 	while (count) {
300 		size_t filled;
301 
302 		if (count >= 4 && !(*ppos % 4)) {
303 			u32 val;
304 
305 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
306 					  *ppos, false);
307 			if (ret <= 0)
308 				goto read_err;
309 
310 			if (copy_to_user(buf, &val, sizeof(val)))
311 				goto read_err;
312 
313 			filled = 4;
314 		} else if (count >= 2 && !(*ppos % 2)) {
315 			u16 val;
316 
317 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
318 					  *ppos, false);
319 			if (ret <= 0)
320 				goto read_err;
321 
322 			if (copy_to_user(buf, &val, sizeof(val)))
323 				goto read_err;
324 
325 			filled = 2;
326 		} else {
327 			u8 val;
328 
329 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
330 					  *ppos, false);
331 			if (ret <= 0)
332 				goto read_err;
333 
334 			if (copy_to_user(buf, &val, sizeof(val)))
335 				goto read_err;
336 
337 			filled = 1;
338 		}
339 
340 		count -= filled;
341 		done += filled;
342 		*ppos += filled;
343 		buf += filled;
344 	}
345 
346 	return done;
347 
348 read_err:
349 	return -EFAULT;
350 }
351 
mdpy_write(struct vfio_device * vdev,const char __user * buf,size_t count,loff_t * ppos)352 static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
353 			  size_t count, loff_t *ppos)
354 {
355 	struct mdev_state *mdev_state =
356 		container_of(vdev, struct mdev_state, vdev);
357 	unsigned int done = 0;
358 	int ret;
359 
360 	while (count) {
361 		size_t filled;
362 
363 		if (count >= 4 && !(*ppos % 4)) {
364 			u32 val;
365 
366 			if (copy_from_user(&val, buf, sizeof(val)))
367 				goto write_err;
368 
369 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
370 					  *ppos, true);
371 			if (ret <= 0)
372 				goto write_err;
373 
374 			filled = 4;
375 		} else if (count >= 2 && !(*ppos % 2)) {
376 			u16 val;
377 
378 			if (copy_from_user(&val, buf, sizeof(val)))
379 				goto write_err;
380 
381 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
382 					  *ppos, true);
383 			if (ret <= 0)
384 				goto write_err;
385 
386 			filled = 2;
387 		} else {
388 			u8 val;
389 
390 			if (copy_from_user(&val, buf, sizeof(val)))
391 				goto write_err;
392 
393 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
394 					  *ppos, true);
395 			if (ret <= 0)
396 				goto write_err;
397 
398 			filled = 1;
399 		}
400 		count -= filled;
401 		done += filled;
402 		*ppos += filled;
403 		buf += filled;
404 	}
405 
406 	return done;
407 write_err:
408 	return -EFAULT;
409 }
410 
mdpy_mmap(struct vfio_device * vdev,struct vm_area_struct * vma)411 static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
412 {
413 	struct mdev_state *mdev_state =
414 		container_of(vdev, struct mdev_state, vdev);
415 
416 	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
417 		return -EINVAL;
418 	if (vma->vm_end < vma->vm_start)
419 		return -EINVAL;
420 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
421 		return -EINVAL;
422 	if ((vma->vm_flags & VM_SHARED) == 0)
423 		return -EINVAL;
424 
425 	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
426 }
427 
mdpy_get_region_info(struct mdev_state * mdev_state,struct vfio_region_info * region_info,u16 * cap_type_id,void ** cap_type)428 static int mdpy_get_region_info(struct mdev_state *mdev_state,
429 				struct vfio_region_info *region_info,
430 				u16 *cap_type_id, void **cap_type)
431 {
432 	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
433 	    region_info->index != MDPY_DISPLAY_REGION)
434 		return -EINVAL;
435 
436 	switch (region_info->index) {
437 	case VFIO_PCI_CONFIG_REGION_INDEX:
438 		region_info->offset = 0;
439 		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
440 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
441 				       VFIO_REGION_INFO_FLAG_WRITE);
442 		break;
443 	case VFIO_PCI_BAR0_REGION_INDEX:
444 	case MDPY_DISPLAY_REGION:
445 		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
446 		region_info->size   = mdev_state->memsize;
447 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
448 				       VFIO_REGION_INFO_FLAG_WRITE |
449 				       VFIO_REGION_INFO_FLAG_MMAP);
450 		break;
451 	default:
452 		region_info->size   = 0;
453 		region_info->offset = 0;
454 		region_info->flags  = 0;
455 	}
456 
457 	return 0;
458 }
459 
mdpy_get_irq_info(struct vfio_irq_info * irq_info)460 static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
461 {
462 	irq_info->count = 0;
463 	return 0;
464 }
465 
mdpy_get_device_info(struct vfio_device_info * dev_info)466 static int mdpy_get_device_info(struct vfio_device_info *dev_info)
467 {
468 	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
469 	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
470 	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
471 	return 0;
472 }
473 
mdpy_query_gfx_plane(struct mdev_state * mdev_state,struct vfio_device_gfx_plane_info * plane)474 static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
475 				struct vfio_device_gfx_plane_info *plane)
476 {
477 	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
478 		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
479 				     VFIO_GFX_PLANE_TYPE_REGION))
480 			return 0;
481 		return -EINVAL;
482 	}
483 
484 	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
485 		return -EINVAL;
486 
487 	plane->drm_format     = mdev_state->type->format;
488 	plane->width	      = mdev_state->type->width;
489 	plane->height	      = mdev_state->type->height;
490 	plane->stride	      = (mdev_state->type->width *
491 				 mdev_state->type->bytepp);
492 	plane->size	      = mdev_state->memsize;
493 	plane->region_index   = MDPY_DISPLAY_REGION;
494 
495 	/* unused */
496 	plane->drm_format_mod = 0;
497 	plane->x_pos	      = 0;
498 	plane->y_pos	      = 0;
499 	plane->x_hot	      = 0;
500 	plane->y_hot	      = 0;
501 
502 	return 0;
503 }
504 
mdpy_ioctl(struct vfio_device * vdev,unsigned int cmd,unsigned long arg)505 static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
506 		       unsigned long arg)
507 {
508 	int ret = 0;
509 	unsigned long minsz;
510 	struct mdev_state *mdev_state =
511 		container_of(vdev, struct mdev_state, vdev);
512 
513 	switch (cmd) {
514 	case VFIO_DEVICE_GET_INFO:
515 	{
516 		struct vfio_device_info info;
517 
518 		minsz = offsetofend(struct vfio_device_info, num_irqs);
519 
520 		if (copy_from_user(&info, (void __user *)arg, minsz))
521 			return -EFAULT;
522 
523 		if (info.argsz < minsz)
524 			return -EINVAL;
525 
526 		ret = mdpy_get_device_info(&info);
527 		if (ret)
528 			return ret;
529 
530 		memcpy(&mdev_state->dev_info, &info, sizeof(info));
531 
532 		if (copy_to_user((void __user *)arg, &info, minsz))
533 			return -EFAULT;
534 
535 		return 0;
536 	}
537 	case VFIO_DEVICE_GET_REGION_INFO:
538 	{
539 		struct vfio_region_info info;
540 		u16 cap_type_id = 0;
541 		void *cap_type = NULL;
542 
543 		minsz = offsetofend(struct vfio_region_info, offset);
544 
545 		if (copy_from_user(&info, (void __user *)arg, minsz))
546 			return -EFAULT;
547 
548 		if (info.argsz < minsz)
549 			return -EINVAL;
550 
551 		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
552 					   &cap_type);
553 		if (ret)
554 			return ret;
555 
556 		if (copy_to_user((void __user *)arg, &info, minsz))
557 			return -EFAULT;
558 
559 		return 0;
560 	}
561 
562 	case VFIO_DEVICE_GET_IRQ_INFO:
563 	{
564 		struct vfio_irq_info info;
565 
566 		minsz = offsetofend(struct vfio_irq_info, count);
567 
568 		if (copy_from_user(&info, (void __user *)arg, minsz))
569 			return -EFAULT;
570 
571 		if ((info.argsz < minsz) ||
572 		    (info.index >= mdev_state->dev_info.num_irqs))
573 			return -EINVAL;
574 
575 		ret = mdpy_get_irq_info(&info);
576 		if (ret)
577 			return ret;
578 
579 		if (copy_to_user((void __user *)arg, &info, minsz))
580 			return -EFAULT;
581 
582 		return 0;
583 	}
584 
585 	case VFIO_DEVICE_QUERY_GFX_PLANE:
586 	{
587 		struct vfio_device_gfx_plane_info plane;
588 
589 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
590 				    region_index);
591 
592 		if (copy_from_user(&plane, (void __user *)arg, minsz))
593 			return -EFAULT;
594 
595 		if (plane.argsz < minsz)
596 			return -EINVAL;
597 
598 		ret = mdpy_query_gfx_plane(mdev_state, &plane);
599 		if (ret)
600 			return ret;
601 
602 		if (copy_to_user((void __user *)arg, &plane, minsz))
603 			return -EFAULT;
604 
605 		return 0;
606 	}
607 
608 	case VFIO_DEVICE_SET_IRQS:
609 		return -EINVAL;
610 
611 	case VFIO_DEVICE_RESET:
612 		return mdpy_reset(mdev_state);
613 	}
614 	return -ENOTTY;
615 }
616 
617 static ssize_t
resolution_show(struct device * dev,struct device_attribute * attr,char * buf)618 resolution_show(struct device *dev, struct device_attribute *attr,
619 		char *buf)
620 {
621 	struct mdev_state *mdev_state = dev_get_drvdata(dev);
622 
623 	return sprintf(buf, "%dx%d\n",
624 		       mdev_state->type->width,
625 		       mdev_state->type->height);
626 }
627 static DEVICE_ATTR_RO(resolution);
628 
629 static struct attribute *mdev_dev_attrs[] = {
630 	&dev_attr_resolution.attr,
631 	NULL,
632 };
633 
634 static const struct attribute_group mdev_dev_group = {
635 	.name  = "vendor",
636 	.attrs = mdev_dev_attrs,
637 };
638 
639 static const struct attribute_group *mdev_dev_groups[] = {
640 	&mdev_dev_group,
641 	NULL,
642 };
643 
name_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)644 static ssize_t name_show(struct mdev_type *mtype,
645 			 struct mdev_type_attribute *attr, char *buf)
646 {
647 	const struct mdpy_type *type =
648 		&mdpy_types[mtype_get_type_group_id(mtype)];
649 
650 	return sprintf(buf, "%s\n", type->name);
651 }
652 static MDEV_TYPE_ATTR_RO(name);
653 
description_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)654 static ssize_t description_show(struct mdev_type *mtype,
655 				struct mdev_type_attribute *attr, char *buf)
656 {
657 	const struct mdpy_type *type =
658 		&mdpy_types[mtype_get_type_group_id(mtype)];
659 
660 	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
661 		       type->width, type->height);
662 }
663 static MDEV_TYPE_ATTR_RO(description);
664 
available_instances_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)665 static ssize_t available_instances_show(struct mdev_type *mtype,
666 					struct mdev_type_attribute *attr,
667 					char *buf)
668 {
669 	return sprintf(buf, "%d\n", max_devices - mdpy_count);
670 }
671 static MDEV_TYPE_ATTR_RO(available_instances);
672 
device_api_show(struct mdev_type * mtype,struct mdev_type_attribute * attr,char * buf)673 static ssize_t device_api_show(struct mdev_type *mtype,
674 			       struct mdev_type_attribute *attr, char *buf)
675 {
676 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
677 }
678 static MDEV_TYPE_ATTR_RO(device_api);
679 
680 static struct attribute *mdev_types_attrs[] = {
681 	&mdev_type_attr_name.attr,
682 	&mdev_type_attr_description.attr,
683 	&mdev_type_attr_device_api.attr,
684 	&mdev_type_attr_available_instances.attr,
685 	NULL,
686 };
687 
688 static struct attribute_group mdev_type_group1 = {
689 	.name  = MDPY_TYPE_1,
690 	.attrs = mdev_types_attrs,
691 };
692 
693 static struct attribute_group mdev_type_group2 = {
694 	.name  = MDPY_TYPE_2,
695 	.attrs = mdev_types_attrs,
696 };
697 
698 static struct attribute_group mdev_type_group3 = {
699 	.name  = MDPY_TYPE_3,
700 	.attrs = mdev_types_attrs,
701 };
702 
703 static struct attribute_group *mdev_type_groups[] = {
704 	&mdev_type_group1,
705 	&mdev_type_group2,
706 	&mdev_type_group3,
707 	NULL,
708 };
709 
710 static const struct vfio_device_ops mdpy_dev_ops = {
711 	.read = mdpy_read,
712 	.write = mdpy_write,
713 	.ioctl = mdpy_ioctl,
714 	.mmap = mdpy_mmap,
715 };
716 
717 static struct mdev_driver mdpy_driver = {
718 	.driver = {
719 		.name = "mdpy",
720 		.owner = THIS_MODULE,
721 		.mod_name = KBUILD_MODNAME,
722 		.dev_groups = mdev_dev_groups,
723 	},
724 	.probe = mdpy_probe,
725 	.remove	= mdpy_remove,
726 };
727 
728 static const struct mdev_parent_ops mdev_fops = {
729 	.owner			= THIS_MODULE,
730 	.device_driver          = &mdpy_driver,
731 	.supported_type_groups	= mdev_type_groups,
732 };
733 
734 static const struct file_operations vd_fops = {
735 	.owner		= THIS_MODULE,
736 };
737 
mdpy_device_release(struct device * dev)738 static void mdpy_device_release(struct device *dev)
739 {
740 	/* nothing */
741 }
742 
mdpy_dev_init(void)743 static int __init mdpy_dev_init(void)
744 {
745 	int ret = 0;
746 
747 	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
748 	if (ret < 0) {
749 		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
750 		return ret;
751 	}
752 	cdev_init(&mdpy_cdev, &vd_fops);
753 	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
754 	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
755 
756 	ret = mdev_register_driver(&mdpy_driver);
757 	if (ret)
758 		goto err_cdev;
759 
760 	mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
761 	if (IS_ERR(mdpy_class)) {
762 		pr_err("Error: failed to register mdpy_dev class\n");
763 		ret = PTR_ERR(mdpy_class);
764 		goto err_driver;
765 	}
766 	mdpy_dev.class = mdpy_class;
767 	mdpy_dev.release = mdpy_device_release;
768 	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
769 
770 	ret = device_register(&mdpy_dev);
771 	if (ret)
772 		goto err_class;
773 
774 	ret = mdev_register_device(&mdpy_dev, &mdev_fops);
775 	if (ret)
776 		goto err_device;
777 
778 	return 0;
779 
780 err_device:
781 	device_unregister(&mdpy_dev);
782 err_class:
783 	class_destroy(mdpy_class);
784 err_driver:
785 	mdev_unregister_driver(&mdpy_driver);
786 err_cdev:
787 	cdev_del(&mdpy_cdev);
788 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
789 	return ret;
790 }
791 
mdpy_dev_exit(void)792 static void __exit mdpy_dev_exit(void)
793 {
794 	mdpy_dev.bus = NULL;
795 	mdev_unregister_device(&mdpy_dev);
796 
797 	device_unregister(&mdpy_dev);
798 	mdev_unregister_driver(&mdpy_driver);
799 	cdev_del(&mdpy_cdev);
800 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
801 	class_destroy(mdpy_class);
802 	mdpy_class = NULL;
803 }
804 
805 module_init(mdpy_dev_init)
806 module_exit(mdpy_dev_exit)
807