1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/shmem_fs.h>
4
5 #include "vkms_drv.h"
6
__vkms_gem_create(struct drm_device * dev,u64 size)7 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
8 u64 size)
9 {
10 struct vkms_gem_object *obj;
11 int ret;
12
13 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
14 if (!obj)
15 return ERR_PTR(-ENOMEM);
16
17 size = roundup(size, PAGE_SIZE);
18 ret = drm_gem_object_init(dev, &obj->gem, size);
19 if (ret) {
20 kfree(obj);
21 return ERR_PTR(ret);
22 }
23
24 mutex_init(&obj->pages_lock);
25
26 return obj;
27 }
28
vkms_gem_free_object(struct drm_gem_object * obj)29 void vkms_gem_free_object(struct drm_gem_object *obj)
30 {
31 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
32 gem);
33
34 kvfree(gem->pages);
35 mutex_destroy(&gem->pages_lock);
36 drm_gem_object_release(obj);
37 kfree(gem);
38 }
39
vkms_gem_fault(struct vm_fault * vmf)40 int vkms_gem_fault(struct vm_fault *vmf)
41 {
42 struct vm_area_struct *vma = vmf->vma;
43 struct vkms_gem_object *obj = vma->vm_private_data;
44 unsigned long vaddr = vmf->address;
45 pgoff_t page_offset;
46 loff_t num_pages;
47 int ret;
48
49 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
50 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
51
52 if (page_offset > num_pages)
53 return VM_FAULT_SIGBUS;
54
55 ret = -ENOENT;
56 mutex_lock(&obj->pages_lock);
57 if (obj->pages) {
58 get_page(obj->pages[page_offset]);
59 vmf->page = obj->pages[page_offset];
60 ret = 0;
61 }
62 mutex_unlock(&obj->pages_lock);
63 if (ret) {
64 struct page *page;
65 struct address_space *mapping;
66
67 mapping = file_inode(obj->gem.filp)->i_mapping;
68 page = shmem_read_mapping_page(mapping, page_offset);
69
70 if (!IS_ERR(page)) {
71 vmf->page = page;
72 ret = 0;
73 } else {
74 switch (PTR_ERR(page)) {
75 case -ENOSPC:
76 case -ENOMEM:
77 ret = VM_FAULT_OOM;
78 break;
79 case -EBUSY:
80 ret = VM_FAULT_RETRY;
81 break;
82 case -EFAULT:
83 case -EINVAL:
84 ret = VM_FAULT_SIGBUS;
85 break;
86 default:
87 WARN_ON(PTR_ERR(page));
88 ret = VM_FAULT_SIGBUS;
89 break;
90 }
91 }
92 }
93 return ret;
94 }
95
vkms_gem_create(struct drm_device * dev,struct drm_file * file,u32 * handle,u64 size)96 static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
97 struct drm_file *file,
98 u32 *handle,
99 u64 size)
100 {
101 struct vkms_gem_object *obj;
102 int ret;
103
104 if (!file || !dev || !handle)
105 return ERR_PTR(-EINVAL);
106
107 obj = __vkms_gem_create(dev, size);
108 if (IS_ERR(obj))
109 return ERR_CAST(obj);
110
111 ret = drm_gem_handle_create(file, &obj->gem, handle);
112 if (ret)
113 return ERR_PTR(ret);
114
115 return &obj->gem;
116 }
117
vkms_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)118 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
119 struct drm_mode_create_dumb *args)
120 {
121 struct drm_gem_object *gem_obj;
122 u64 pitch, size;
123
124 if (!args || !dev || !file)
125 return -EINVAL;
126
127 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
128 size = pitch * args->height;
129
130 if (!size)
131 return -EINVAL;
132
133 gem_obj = vkms_gem_create(dev, file, &args->handle, size);
134 if (IS_ERR(gem_obj))
135 return PTR_ERR(gem_obj);
136
137 args->size = gem_obj->size;
138 args->pitch = pitch;
139
140 drm_gem_object_put_unlocked(gem_obj);
141
142 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
143
144 return 0;
145 }
146
vkms_dumb_map(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)147 int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
148 u32 handle, u64 *offset)
149 {
150 struct drm_gem_object *obj;
151 int ret;
152
153 obj = drm_gem_object_lookup(file, handle);
154 if (!obj)
155 return -ENOENT;
156
157 if (!obj->filp) {
158 ret = -EINVAL;
159 goto unref;
160 }
161
162 ret = drm_gem_create_mmap_offset(obj);
163 if (ret)
164 goto unref;
165
166 *offset = drm_vma_node_offset_addr(&obj->vma_node);
167 unref:
168 drm_gem_object_put_unlocked(obj);
169
170 return ret;
171 }
172