• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <linux/io-mapping.h>
qxl_ttm_bo_destroy(struct ttm_buffer_object * tbo)30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 	struct qxl_bo *bo;
33 	struct qxl_device *qdev;
34 
35 	bo = to_qxl_bo(tbo);
36 	qdev = to_qxl(bo->tbo.base.dev);
37 
38 	qxl_surface_evict(qdev, bo, false);
39 	WARN_ON_ONCE(bo->map_count > 0);
40 	mutex_lock(&qdev->gem.mutex);
41 	list_del_init(&bo->list);
42 	mutex_unlock(&qdev->gem.mutex);
43 	drm_gem_object_release(&bo->tbo.base);
44 	kfree(bo);
45 }
46 
qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object * bo)47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49 	if (bo->destroy == &qxl_ttm_bo_destroy)
50 		return true;
51 	return false;
52 }
53 
qxl_ttm_placement_from_domain(struct qxl_bo * qbo,u32 domain,bool pinned)54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56 	u32 c = 0;
57 	u32 pflag = 0;
58 	unsigned int i;
59 
60 	if (pinned)
61 		pflag |= TTM_PL_FLAG_NO_EVICT;
62 	if (qbo->tbo.base.size <= PAGE_SIZE)
63 		pflag |= TTM_PL_FLAG_TOPDOWN;
64 
65 	qbo->placement.placement = qbo->placements;
66 	qbo->placement.busy_placement = qbo->placements;
67 	if (domain == QXL_GEM_DOMAIN_VRAM) {
68 		qbo->placements[c].mem_type = TTM_PL_VRAM;
69 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
70 	}
71 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
72 		qbo->placements[c].mem_type = TTM_PL_PRIV;
73 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
74 		qbo->placements[c].mem_type = TTM_PL_VRAM;
75 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
76 	}
77 	if (domain == QXL_GEM_DOMAIN_CPU) {
78 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
79 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
80 	}
81 	if (!c) {
82 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
83 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
84 	}
85 	qbo->placement.num_placement = c;
86 	qbo->placement.num_busy_placement = c;
87 	for (i = 0; i < c; ++i) {
88 		qbo->placements[i].fpfn = 0;
89 		qbo->placements[i].lpfn = 0;
90 	}
91 }
92 
93 static const struct drm_gem_object_funcs qxl_object_funcs = {
94 	.free = qxl_gem_object_free,
95 	.open = qxl_gem_object_open,
96 	.close = qxl_gem_object_close,
97 	.pin = qxl_gem_prime_pin,
98 	.unpin = qxl_gem_prime_unpin,
99 	.get_sg_table = qxl_gem_prime_get_sg_table,
100 	.vmap = qxl_gem_prime_vmap,
101 	.vunmap = qxl_gem_prime_vunmap,
102 	.mmap = drm_gem_ttm_mmap,
103 	.print_info = drm_gem_ttm_print_info,
104 };
105 
qxl_bo_create(struct qxl_device * qdev,unsigned long size,bool kernel,bool pinned,u32 domain,u32 priority,struct qxl_surface * surf,struct qxl_bo ** bo_ptr)106 int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
107 		  bool kernel, bool pinned, u32 domain, u32 priority,
108 		  struct qxl_surface *surf,
109 		  struct qxl_bo **bo_ptr)
110 {
111 	struct qxl_bo *bo;
112 	enum ttm_bo_type type;
113 	int r;
114 
115 	if (kernel)
116 		type = ttm_bo_type_kernel;
117 	else
118 		type = ttm_bo_type_device;
119 	*bo_ptr = NULL;
120 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
121 	if (bo == NULL)
122 		return -ENOMEM;
123 	size = roundup(size, PAGE_SIZE);
124 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
125 	if (unlikely(r)) {
126 		kfree(bo);
127 		return r;
128 	}
129 	bo->tbo.base.funcs = &qxl_object_funcs;
130 	bo->type = domain;
131 	bo->pin_count = pinned ? 1 : 0;
132 	bo->surface_id = 0;
133 	INIT_LIST_HEAD(&bo->list);
134 
135 	if (surf)
136 		bo->surf = *surf;
137 
138 	qxl_ttm_placement_from_domain(bo, domain, pinned);
139 
140 	bo->tbo.priority = priority;
141 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
142 			&bo->placement, 0, !kernel, size,
143 			NULL, NULL, &qxl_ttm_bo_destroy);
144 	if (unlikely(r != 0)) {
145 		if (r != -ERESTARTSYS)
146 			dev_err(qdev->ddev.dev,
147 				"object_init failed for (%lu, 0x%08X)\n",
148 				size, domain);
149 		return r;
150 	}
151 	*bo_ptr = bo;
152 	return 0;
153 }
154 
qxl_bo_kmap(struct qxl_bo * bo,void ** ptr)155 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
156 {
157 	bool is_iomem;
158 	int r;
159 
160 	if (bo->kptr) {
161 		if (ptr)
162 			*ptr = bo->kptr;
163 		bo->map_count++;
164 		return 0;
165 	}
166 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
167 	if (r)
168 		return r;
169 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
170 	if (ptr)
171 		*ptr = bo->kptr;
172 	bo->map_count = 1;
173 	return 0;
174 }
175 
qxl_bo_kmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,int page_offset)176 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
177 			      struct qxl_bo *bo, int page_offset)
178 {
179 	unsigned long offset;
180 	void *rptr;
181 	int ret;
182 	struct io_mapping *map;
183 
184 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
185 		map = qdev->vram_mapping;
186 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
187 		map = qdev->surface_mapping;
188 	else
189 		goto fallback;
190 
191 	offset = bo->tbo.mem.start << PAGE_SHIFT;
192 	return io_mapping_map_atomic_wc(map, offset + page_offset);
193 fallback:
194 	if (bo->kptr) {
195 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
196 		return rptr;
197 	}
198 
199 	ret = qxl_bo_kmap(bo, &rptr);
200 	if (ret)
201 		return NULL;
202 
203 	rptr += page_offset * PAGE_SIZE;
204 	return rptr;
205 }
206 
qxl_bo_kunmap(struct qxl_bo * bo)207 void qxl_bo_kunmap(struct qxl_bo *bo)
208 {
209 	if (bo->kptr == NULL)
210 		return;
211 	bo->map_count--;
212 	if (bo->map_count > 0)
213 		return;
214 	bo->kptr = NULL;
215 	ttm_bo_kunmap(&bo->kmap);
216 }
217 
qxl_bo_kunmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,void * pmap)218 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
219 			       struct qxl_bo *bo, void *pmap)
220 {
221 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
222 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
223 		goto fallback;
224 
225 	io_mapping_unmap_atomic(pmap);
226 	return;
227  fallback:
228 	qxl_bo_kunmap(bo);
229 }
230 
qxl_bo_unref(struct qxl_bo ** bo)231 void qxl_bo_unref(struct qxl_bo **bo)
232 {
233 	if ((*bo) == NULL)
234 		return;
235 
236 	drm_gem_object_put(&(*bo)->tbo.base);
237 	*bo = NULL;
238 }
239 
qxl_bo_ref(struct qxl_bo * bo)240 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
241 {
242 	drm_gem_object_get(&bo->tbo.base);
243 	return bo;
244 }
245 
__qxl_bo_pin(struct qxl_bo * bo)246 static int __qxl_bo_pin(struct qxl_bo *bo)
247 {
248 	struct ttm_operation_ctx ctx = { false, false };
249 	struct drm_device *ddev = bo->tbo.base.dev;
250 	int r;
251 
252 	if (bo->pin_count) {
253 		bo->pin_count++;
254 		return 0;
255 	}
256 	qxl_ttm_placement_from_domain(bo, bo->type, true);
257 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
258 	if (likely(r == 0)) {
259 		bo->pin_count = 1;
260 	}
261 	if (unlikely(r != 0))
262 		dev_err(ddev->dev, "%p pin failed\n", bo);
263 	return r;
264 }
265 
__qxl_bo_unpin(struct qxl_bo * bo)266 static int __qxl_bo_unpin(struct qxl_bo *bo)
267 {
268 	struct ttm_operation_ctx ctx = { false, false };
269 	struct drm_device *ddev = bo->tbo.base.dev;
270 	int r, i;
271 
272 	if (!bo->pin_count) {
273 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
274 		return 0;
275 	}
276 	bo->pin_count--;
277 	if (bo->pin_count)
278 		return 0;
279 	for (i = 0; i < bo->placement.num_placement; i++)
280 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
281 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
282 	if (unlikely(r != 0))
283 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
284 	return r;
285 }
286 
287 /*
288  * Reserve the BO before pinning the object.  If the BO was reserved
289  * beforehand, use the internal version directly __qxl_bo_pin.
290  *
291  */
qxl_bo_pin(struct qxl_bo * bo)292 int qxl_bo_pin(struct qxl_bo *bo)
293 {
294 	int r;
295 
296 	r = qxl_bo_reserve(bo);
297 	if (r)
298 		return r;
299 
300 	r = __qxl_bo_pin(bo);
301 	qxl_bo_unreserve(bo);
302 	return r;
303 }
304 
305 /*
306  * Reserve the BO before pinning the object.  If the BO was reserved
307  * beforehand, use the internal version directly __qxl_bo_unpin.
308  *
309  */
qxl_bo_unpin(struct qxl_bo * bo)310 int qxl_bo_unpin(struct qxl_bo *bo)
311 {
312 	int r;
313 
314 	r = qxl_bo_reserve(bo);
315 	if (r)
316 		return r;
317 
318 	r = __qxl_bo_unpin(bo);
319 	qxl_bo_unreserve(bo);
320 	return r;
321 }
322 
qxl_bo_force_delete(struct qxl_device * qdev)323 void qxl_bo_force_delete(struct qxl_device *qdev)
324 {
325 	struct qxl_bo *bo, *n;
326 
327 	if (list_empty(&qdev->gem.objects))
328 		return;
329 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
330 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
331 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
332 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
333 			*((unsigned long *)&bo->tbo.base.refcount));
334 		mutex_lock(&qdev->gem.mutex);
335 		list_del_init(&bo->list);
336 		mutex_unlock(&qdev->gem.mutex);
337 		/* this should unref the ttm bo */
338 		drm_gem_object_put(&bo->tbo.base);
339 	}
340 }
341 
qxl_bo_init(struct qxl_device * qdev)342 int qxl_bo_init(struct qxl_device *qdev)
343 {
344 	return qxl_ttm_init(qdev);
345 }
346 
qxl_bo_fini(struct qxl_device * qdev)347 void qxl_bo_fini(struct qxl_device *qdev)
348 {
349 	qxl_ttm_fini(qdev);
350 }
351 
qxl_bo_check_id(struct qxl_device * qdev,struct qxl_bo * bo)352 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
353 {
354 	int ret;
355 
356 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
357 		/* allocate a surface id for this surface now */
358 		ret = qxl_surface_id_alloc(qdev, bo);
359 		if (ret)
360 			return ret;
361 
362 		ret = qxl_hw_surface_alloc(qdev, bo);
363 		if (ret)
364 			return ret;
365 	}
366 	return 0;
367 }
368 
qxl_surf_evict(struct qxl_device * qdev)369 int qxl_surf_evict(struct qxl_device *qdev)
370 {
371 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
372 }
373 
qxl_vram_evict(struct qxl_device * qdev)374 int qxl_vram_evict(struct qxl_device *qdev)
375 {
376 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
377 }
378