1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34
35 struct vmw_user_dma_buffer {
36 struct ttm_base_object base;
37 struct vmw_dma_buffer dma;
38 };
39
40 struct vmw_bo_user_rep {
41 uint32_t handle;
42 uint64_t map_handle;
43 };
44
45 struct vmw_stream {
46 struct vmw_resource res;
47 uint32_t stream_id;
48 };
49
50 struct vmw_user_stream {
51 struct ttm_base_object base;
52 struct vmw_stream stream;
53 };
54
55
56 static uint64_t vmw_user_stream_size;
57
58 static const struct vmw_res_func vmw_stream_func = {
59 .res_type = vmw_res_stream,
60 .needs_backup = false,
61 .may_evict = false,
62 .type_name = "video streams",
63 .backup_placement = NULL,
64 .create = NULL,
65 .destroy = NULL,
66 .bind = NULL,
67 .unbind = NULL
68 };
69
70 static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object * bo)71 vmw_dma_buffer(struct ttm_buffer_object *bo)
72 {
73 return container_of(bo, struct vmw_dma_buffer, base);
74 }
75
76 static inline struct vmw_user_dma_buffer *
vmw_user_dma_buffer(struct ttm_buffer_object * bo)77 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
78 {
79 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
80 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81 }
82
vmw_resource_reference(struct vmw_resource * res)83 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
84 {
85 kref_get(&res->kref);
86 return res;
87 }
88
89
90 /**
91 * vmw_resource_release_id - release a resource id to the id manager.
92 *
93 * @res: Pointer to the resource.
94 *
95 * Release the resource id to the resource id manager and set it to -1
96 */
vmw_resource_release_id(struct vmw_resource * res)97 void vmw_resource_release_id(struct vmw_resource *res)
98 {
99 struct vmw_private *dev_priv = res->dev_priv;
100 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
101
102 write_lock(&dev_priv->resource_lock);
103 if (res->id != -1)
104 idr_remove(idr, res->id);
105 res->id = -1;
106 write_unlock(&dev_priv->resource_lock);
107 }
108
vmw_resource_release(struct kref * kref)109 static void vmw_resource_release(struct kref *kref)
110 {
111 struct vmw_resource *res =
112 container_of(kref, struct vmw_resource, kref);
113 struct vmw_private *dev_priv = res->dev_priv;
114 int id;
115 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
116
117 res->avail = false;
118 list_del_init(&res->lru_head);
119 write_unlock(&dev_priv->resource_lock);
120 if (res->backup) {
121 struct ttm_buffer_object *bo = &res->backup->base;
122
123 ttm_bo_reserve(bo, false, false, false, 0);
124 if (!list_empty(&res->mob_head) &&
125 res->func->unbind != NULL) {
126 struct ttm_validate_buffer val_buf;
127
128 val_buf.bo = bo;
129 res->func->unbind(res, false, &val_buf);
130 }
131 res->backup_dirty = false;
132 list_del_init(&res->mob_head);
133 ttm_bo_unreserve(bo);
134 vmw_dmabuf_unreference(&res->backup);
135 }
136
137 if (likely(res->hw_destroy != NULL))
138 res->hw_destroy(res);
139
140 id = res->id;
141 if (res->res_free != NULL)
142 res->res_free(res);
143 else
144 kfree(res);
145
146 write_lock(&dev_priv->resource_lock);
147
148 if (id != -1)
149 idr_remove(idr, id);
150 }
151
vmw_resource_unreference(struct vmw_resource ** p_res)152 void vmw_resource_unreference(struct vmw_resource **p_res)
153 {
154 struct vmw_resource *res = *p_res;
155 struct vmw_private *dev_priv = res->dev_priv;
156
157 *p_res = NULL;
158 write_lock(&dev_priv->resource_lock);
159 kref_put(&res->kref, vmw_resource_release);
160 write_unlock(&dev_priv->resource_lock);
161 }
162
163
164 /**
165 * vmw_resource_alloc_id - release a resource id to the id manager.
166 *
167 * @res: Pointer to the resource.
168 *
169 * Allocate the lowest free resource from the resource manager, and set
170 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
171 */
vmw_resource_alloc_id(struct vmw_resource * res)172 int vmw_resource_alloc_id(struct vmw_resource *res)
173 {
174 struct vmw_private *dev_priv = res->dev_priv;
175 int ret;
176 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
177
178 BUG_ON(res->id != -1);
179
180 idr_preload(GFP_KERNEL);
181 write_lock(&dev_priv->resource_lock);
182
183 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
184 if (ret >= 0)
185 res->id = ret;
186
187 write_unlock(&dev_priv->resource_lock);
188 idr_preload_end();
189 return ret < 0 ? ret : 0;
190 }
191
192 /**
193 * vmw_resource_init - initialize a struct vmw_resource
194 *
195 * @dev_priv: Pointer to a device private struct.
196 * @res: The struct vmw_resource to initialize.
197 * @obj_type: Resource object type.
198 * @delay_id: Boolean whether to defer device id allocation until
199 * the first validation.
200 * @res_free: Resource destructor.
201 * @func: Resource function table.
202 */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)203 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
204 bool delay_id,
205 void (*res_free) (struct vmw_resource *res),
206 const struct vmw_res_func *func)
207 {
208 kref_init(&res->kref);
209 res->hw_destroy = NULL;
210 res->res_free = res_free;
211 res->avail = false;
212 res->dev_priv = dev_priv;
213 res->func = func;
214 INIT_LIST_HEAD(&res->lru_head);
215 INIT_LIST_HEAD(&res->mob_head);
216 res->id = -1;
217 res->backup = NULL;
218 res->backup_offset = 0;
219 res->backup_dirty = false;
220 res->res_dirty = false;
221 if (delay_id)
222 return 0;
223 else
224 return vmw_resource_alloc_id(res);
225 }
226
227 /**
228 * vmw_resource_activate
229 *
230 * @res: Pointer to the newly created resource
231 * @hw_destroy: Destroy function. NULL if none.
232 *
233 * Activate a resource after the hardware has been made aware of it.
234 * Set tye destroy function to @destroy. Typically this frees the
235 * resource and destroys the hardware resources associated with it.
236 * Activate basically means that the function vmw_resource_lookup will
237 * find it.
238 */
vmw_resource_activate(struct vmw_resource * res,void (* hw_destroy)(struct vmw_resource *))239 void vmw_resource_activate(struct vmw_resource *res,
240 void (*hw_destroy) (struct vmw_resource *))
241 {
242 struct vmw_private *dev_priv = res->dev_priv;
243
244 write_lock(&dev_priv->resource_lock);
245 res->avail = true;
246 res->hw_destroy = hw_destroy;
247 write_unlock(&dev_priv->resource_lock);
248 }
249
vmw_resource_lookup(struct vmw_private * dev_priv,struct idr * idr,int id)250 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
251 struct idr *idr, int id)
252 {
253 struct vmw_resource *res;
254
255 read_lock(&dev_priv->resource_lock);
256 res = idr_find(idr, id);
257 if (res && res->avail)
258 kref_get(&res->kref);
259 else
260 res = NULL;
261 read_unlock(&dev_priv->resource_lock);
262
263 if (unlikely(res == NULL))
264 return NULL;
265
266 return res;
267 }
268
269 /**
270 * vmw_user_resource_lookup_handle - lookup a struct resource from a
271 * TTM user-space handle and perform basic type checks
272 *
273 * @dev_priv: Pointer to a device private struct
274 * @tfile: Pointer to a struct ttm_object_file identifying the caller
275 * @handle: The TTM user-space handle
276 * @converter: Pointer to an object describing the resource type
277 * @p_res: On successful return the location pointed to will contain
278 * a pointer to a refcounted struct vmw_resource.
279 *
280 * If the handle can't be found or is associated with an incorrect resource
281 * type, -EINVAL will be returned.
282 */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)283 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
284 struct ttm_object_file *tfile,
285 uint32_t handle,
286 const struct vmw_user_resource_conv
287 *converter,
288 struct vmw_resource **p_res)
289 {
290 struct ttm_base_object *base;
291 struct vmw_resource *res;
292 int ret = -EINVAL;
293
294 base = ttm_base_object_lookup(tfile, handle);
295 if (unlikely(base == NULL))
296 return -EINVAL;
297
298 if (unlikely(base->object_type != converter->object_type))
299 goto out_bad_resource;
300
301 res = converter->base_obj_to_res(base);
302
303 read_lock(&dev_priv->resource_lock);
304 if (!res->avail || res->res_free != converter->res_free) {
305 read_unlock(&dev_priv->resource_lock);
306 goto out_bad_resource;
307 }
308
309 kref_get(&res->kref);
310 read_unlock(&dev_priv->resource_lock);
311
312 *p_res = res;
313 ret = 0;
314
315 out_bad_resource:
316 ttm_base_object_unref(&base);
317
318 return ret;
319 }
320
321 /**
322 * Helper function that looks either a surface or dmabuf.
323 *
324 * The pointer this pointed at by out_surf and out_buf needs to be null.
325 */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_dma_buffer ** out_buf)326 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
327 struct ttm_object_file *tfile,
328 uint32_t handle,
329 struct vmw_surface **out_surf,
330 struct vmw_dma_buffer **out_buf)
331 {
332 struct vmw_resource *res;
333 int ret;
334
335 BUG_ON(*out_surf || *out_buf);
336
337 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
338 user_surface_converter,
339 &res);
340 if (!ret) {
341 *out_surf = vmw_res_to_srf(res);
342 return 0;
343 }
344
345 *out_surf = NULL;
346 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
347 return ret;
348 }
349
350 /**
351 * Buffer management.
352 */
vmw_dmabuf_bo_free(struct ttm_buffer_object * bo)353 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354 {
355 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356
357 kfree(vmw_bo);
358 }
359
vmw_dmabuf_init(struct vmw_private * dev_priv,struct vmw_dma_buffer * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))360 int vmw_dmabuf_init(struct vmw_private *dev_priv,
361 struct vmw_dma_buffer *vmw_bo,
362 size_t size, struct ttm_placement *placement,
363 bool interruptible,
364 void (*bo_free) (struct ttm_buffer_object *bo))
365 {
366 struct ttm_bo_device *bdev = &dev_priv->bdev;
367 size_t acc_size;
368 int ret;
369
370 BUG_ON(!bo_free);
371
372 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
373 memset(vmw_bo, 0, sizeof(*vmw_bo));
374
375 INIT_LIST_HEAD(&vmw_bo->res_list);
376
377 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
378 ttm_bo_type_device, placement,
379 0, interruptible,
380 NULL, acc_size, NULL, bo_free);
381 return ret;
382 }
383
vmw_user_dmabuf_destroy(struct ttm_buffer_object * bo)384 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
385 {
386 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
387
388 ttm_base_object_kfree(vmw_user_bo, base);
389 }
390
vmw_user_dmabuf_release(struct ttm_base_object ** p_base)391 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
392 {
393 struct vmw_user_dma_buffer *vmw_user_bo;
394 struct ttm_base_object *base = *p_base;
395 struct ttm_buffer_object *bo;
396
397 *p_base = NULL;
398
399 if (unlikely(base == NULL))
400 return;
401
402 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
403 bo = &vmw_user_bo->dma.base;
404 ttm_bo_unref(&bo);
405 }
406
407 /**
408 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
409 *
410 * @dev_priv: Pointer to a struct device private.
411 * @tfile: Pointer to a struct ttm_object_file on which to register the user
412 * object.
413 * @size: Size of the dma buffer.
414 * @shareable: Boolean whether the buffer is shareable with other open files.
415 * @handle: Pointer to where the handle value should be assigned.
416 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
417 * should be assigned.
418 */
vmw_user_dmabuf_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_dma_buffer ** p_dma_buf)419 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
420 struct ttm_object_file *tfile,
421 uint32_t size,
422 bool shareable,
423 uint32_t *handle,
424 struct vmw_dma_buffer **p_dma_buf)
425 {
426 struct vmw_user_dma_buffer *user_bo;
427 struct ttm_buffer_object *tmp;
428 int ret;
429
430 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
431 if (unlikely(user_bo == NULL)) {
432 DRM_ERROR("Failed to allocate a buffer.\n");
433 return -ENOMEM;
434 }
435
436 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
437 &vmw_vram_sys_placement, true,
438 &vmw_user_dmabuf_destroy);
439 if (unlikely(ret != 0))
440 return ret;
441
442 tmp = ttm_bo_reference(&user_bo->dma.base);
443 ret = ttm_base_object_init(tfile,
444 &user_bo->base,
445 shareable,
446 ttm_buffer_type,
447 &vmw_user_dmabuf_release, NULL);
448 if (unlikely(ret != 0)) {
449 ttm_bo_unref(&tmp);
450 goto out_no_base_object;
451 }
452
453 *p_dma_buf = &user_bo->dma;
454 *handle = user_bo->base.hash.key;
455
456 out_no_base_object:
457 return ret;
458 }
459
460 /**
461 * vmw_user_dmabuf_verify_access - verify access permissions on this
462 * buffer object.
463 *
464 * @bo: Pointer to the buffer object being accessed
465 * @tfile: Identifying the caller.
466 */
vmw_user_dmabuf_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)467 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
468 struct ttm_object_file *tfile)
469 {
470 struct vmw_user_dma_buffer *vmw_user_bo;
471
472 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
473 return -EPERM;
474
475 vmw_user_bo = vmw_user_dma_buffer(bo);
476 return (vmw_user_bo->base.tfile == tfile ||
477 vmw_user_bo->base.shareable) ? 0 : -EPERM;
478 }
479
vmw_dmabuf_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)480 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
481 struct drm_file *file_priv)
482 {
483 struct vmw_private *dev_priv = vmw_priv(dev);
484 union drm_vmw_alloc_dmabuf_arg *arg =
485 (union drm_vmw_alloc_dmabuf_arg *)data;
486 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
487 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
488 struct vmw_dma_buffer *dma_buf;
489 uint32_t handle;
490 struct vmw_master *vmaster = vmw_master(file_priv->master);
491 int ret;
492
493 ret = ttm_read_lock(&vmaster->lock, true);
494 if (unlikely(ret != 0))
495 return ret;
496
497 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
498 req->size, false, &handle, &dma_buf);
499 if (unlikely(ret != 0))
500 goto out_no_dmabuf;
501
502 rep->handle = handle;
503 rep->map_handle = dma_buf->base.addr_space_offset;
504 rep->cur_gmr_id = handle;
505 rep->cur_gmr_offset = 0;
506
507 vmw_dmabuf_unreference(&dma_buf);
508
509 out_no_dmabuf:
510 ttm_read_unlock(&vmaster->lock);
511
512 return ret;
513 }
514
vmw_dmabuf_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)515 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
517 {
518 struct drm_vmw_unref_dmabuf_arg *arg =
519 (struct drm_vmw_unref_dmabuf_arg *)data;
520
521 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
522 arg->handle,
523 TTM_REF_USAGE);
524 }
525
vmw_user_dmabuf_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_dma_buffer ** out)526 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
527 uint32_t handle, struct vmw_dma_buffer **out)
528 {
529 struct vmw_user_dma_buffer *vmw_user_bo;
530 struct ttm_base_object *base;
531
532 base = ttm_base_object_lookup(tfile, handle);
533 if (unlikely(base == NULL)) {
534 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
535 (unsigned long)handle);
536 return -ESRCH;
537 }
538
539 if (unlikely(base->object_type != ttm_buffer_type)) {
540 ttm_base_object_unref(&base);
541 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
542 (unsigned long)handle);
543 return -EINVAL;
544 }
545
546 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
547 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
548 ttm_base_object_unref(&base);
549 *out = &vmw_user_bo->dma;
550
551 return 0;
552 }
553
vmw_user_dmabuf_reference(struct ttm_object_file * tfile,struct vmw_dma_buffer * dma_buf)554 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
555 struct vmw_dma_buffer *dma_buf)
556 {
557 struct vmw_user_dma_buffer *user_bo;
558
559 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
560 return -EINVAL;
561
562 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
563 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
564 }
565
566 /*
567 * Stream management
568 */
569
vmw_stream_destroy(struct vmw_resource * res)570 static void vmw_stream_destroy(struct vmw_resource *res)
571 {
572 struct vmw_private *dev_priv = res->dev_priv;
573 struct vmw_stream *stream;
574 int ret;
575
576 DRM_INFO("%s: unref\n", __func__);
577 stream = container_of(res, struct vmw_stream, res);
578
579 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
580 WARN_ON(ret != 0);
581 }
582
vmw_stream_init(struct vmw_private * dev_priv,struct vmw_stream * stream,void (* res_free)(struct vmw_resource * res))583 static int vmw_stream_init(struct vmw_private *dev_priv,
584 struct vmw_stream *stream,
585 void (*res_free) (struct vmw_resource *res))
586 {
587 struct vmw_resource *res = &stream->res;
588 int ret;
589
590 ret = vmw_resource_init(dev_priv, res, false, res_free,
591 &vmw_stream_func);
592
593 if (unlikely(ret != 0)) {
594 if (res_free == NULL)
595 kfree(stream);
596 else
597 res_free(&stream->res);
598 return ret;
599 }
600
601 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
602 if (ret) {
603 vmw_resource_unreference(&res);
604 return ret;
605 }
606
607 DRM_INFO("%s: claimed\n", __func__);
608
609 vmw_resource_activate(&stream->res, vmw_stream_destroy);
610 return 0;
611 }
612
vmw_user_stream_free(struct vmw_resource * res)613 static void vmw_user_stream_free(struct vmw_resource *res)
614 {
615 struct vmw_user_stream *stream =
616 container_of(res, struct vmw_user_stream, stream.res);
617 struct vmw_private *dev_priv = res->dev_priv;
618
619 ttm_base_object_kfree(stream, base);
620 ttm_mem_global_free(vmw_mem_glob(dev_priv),
621 vmw_user_stream_size);
622 }
623
624 /**
625 * This function is called when user space has no more references on the
626 * base object. It releases the base-object's reference on the resource object.
627 */
628
vmw_user_stream_base_release(struct ttm_base_object ** p_base)629 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
630 {
631 struct ttm_base_object *base = *p_base;
632 struct vmw_user_stream *stream =
633 container_of(base, struct vmw_user_stream, base);
634 struct vmw_resource *res = &stream->stream.res;
635
636 *p_base = NULL;
637 vmw_resource_unreference(&res);
638 }
639
vmw_stream_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)640 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
641 struct drm_file *file_priv)
642 {
643 struct vmw_private *dev_priv = vmw_priv(dev);
644 struct vmw_resource *res;
645 struct vmw_user_stream *stream;
646 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
647 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
648 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
649 int ret = 0;
650
651
652 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
653 if (unlikely(res == NULL))
654 return -EINVAL;
655
656 if (res->res_free != &vmw_user_stream_free) {
657 ret = -EINVAL;
658 goto out;
659 }
660
661 stream = container_of(res, struct vmw_user_stream, stream.res);
662 if (stream->base.tfile != tfile) {
663 ret = -EINVAL;
664 goto out;
665 }
666
667 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
668 out:
669 vmw_resource_unreference(&res);
670 return ret;
671 }
672
vmw_stream_claim_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)673 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
674 struct drm_file *file_priv)
675 {
676 struct vmw_private *dev_priv = vmw_priv(dev);
677 struct vmw_user_stream *stream;
678 struct vmw_resource *res;
679 struct vmw_resource *tmp;
680 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
681 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
682 struct vmw_master *vmaster = vmw_master(file_priv->master);
683 int ret;
684
685 /*
686 * Approximate idr memory usage with 128 bytes. It will be limited
687 * by maximum number_of streams anyway?
688 */
689
690 if (unlikely(vmw_user_stream_size == 0))
691 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
692
693 ret = ttm_read_lock(&vmaster->lock, true);
694 if (unlikely(ret != 0))
695 return ret;
696
697 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
698 vmw_user_stream_size,
699 false, true);
700 if (unlikely(ret != 0)) {
701 if (ret != -ERESTARTSYS)
702 DRM_ERROR("Out of graphics memory for stream"
703 " creation.\n");
704 goto out_unlock;
705 }
706
707
708 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
709 if (unlikely(stream == NULL)) {
710 ttm_mem_global_free(vmw_mem_glob(dev_priv),
711 vmw_user_stream_size);
712 ret = -ENOMEM;
713 goto out_unlock;
714 }
715
716 res = &stream->stream.res;
717 stream->base.shareable = false;
718 stream->base.tfile = NULL;
719
720 /*
721 * From here on, the destructor takes over resource freeing.
722 */
723
724 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
725 if (unlikely(ret != 0))
726 goto out_unlock;
727
728 tmp = vmw_resource_reference(res);
729 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
730 &vmw_user_stream_base_release, NULL);
731
732 if (unlikely(ret != 0)) {
733 vmw_resource_unreference(&tmp);
734 goto out_err;
735 }
736
737 arg->stream_id = res->id;
738 out_err:
739 vmw_resource_unreference(&res);
740 out_unlock:
741 ttm_read_unlock(&vmaster->lock);
742 return ret;
743 }
744
vmw_user_stream_lookup(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t * inout_id,struct vmw_resource ** out)745 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
746 struct ttm_object_file *tfile,
747 uint32_t *inout_id, struct vmw_resource **out)
748 {
749 struct vmw_user_stream *stream;
750 struct vmw_resource *res;
751 int ret;
752
753 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
754 *inout_id);
755 if (unlikely(res == NULL))
756 return -EINVAL;
757
758 if (res->res_free != &vmw_user_stream_free) {
759 ret = -EINVAL;
760 goto err_ref;
761 }
762
763 stream = container_of(res, struct vmw_user_stream, stream.res);
764 if (stream->base.tfile != tfile) {
765 ret = -EPERM;
766 goto err_ref;
767 }
768
769 *inout_id = stream->stream.stream_id;
770 *out = res;
771 return 0;
772 err_ref:
773 vmw_resource_unreference(&res);
774 return ret;
775 }
776
777
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)778 int vmw_dumb_create(struct drm_file *file_priv,
779 struct drm_device *dev,
780 struct drm_mode_create_dumb *args)
781 {
782 struct vmw_private *dev_priv = vmw_priv(dev);
783 struct vmw_master *vmaster = vmw_master(file_priv->master);
784 struct vmw_user_dma_buffer *vmw_user_bo;
785 struct ttm_buffer_object *tmp;
786 int ret;
787
788 args->pitch = args->width * ((args->bpp + 7) / 8);
789 args->size = args->pitch * args->height;
790
791 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
792 if (vmw_user_bo == NULL)
793 return -ENOMEM;
794
795 ret = ttm_read_lock(&vmaster->lock, true);
796 if (ret != 0) {
797 kfree(vmw_user_bo);
798 return ret;
799 }
800
801 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
802 &vmw_vram_sys_placement, true,
803 &vmw_user_dmabuf_destroy);
804 if (ret != 0)
805 goto out_no_dmabuf;
806
807 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
808 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
809 &vmw_user_bo->base,
810 false,
811 ttm_buffer_type,
812 &vmw_user_dmabuf_release, NULL);
813 if (unlikely(ret != 0))
814 goto out_no_base_object;
815
816 args->handle = vmw_user_bo->base.hash.key;
817
818 out_no_base_object:
819 ttm_bo_unref(&tmp);
820 out_no_dmabuf:
821 ttm_read_unlock(&vmaster->lock);
822 return ret;
823 }
824
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)825 int vmw_dumb_map_offset(struct drm_file *file_priv,
826 struct drm_device *dev, uint32_t handle,
827 uint64_t *offset)
828 {
829 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
830 struct vmw_dma_buffer *out_buf;
831 int ret;
832
833 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
834 if (ret != 0)
835 return -EINVAL;
836
837 *offset = out_buf->base.addr_space_offset;
838 vmw_dmabuf_unreference(&out_buf);
839 return 0;
840 }
841
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)842 int vmw_dumb_destroy(struct drm_file *file_priv,
843 struct drm_device *dev,
844 uint32_t handle)
845 {
846 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
847 handle, TTM_REF_USAGE);
848 }
849
850 /**
851 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
852 *
853 * @res: The resource for which to allocate a backup buffer.
854 * @interruptible: Whether any sleeps during allocation should be
855 * performed while interruptible.
856 */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)857 static int vmw_resource_buf_alloc(struct vmw_resource *res,
858 bool interruptible)
859 {
860 unsigned long size =
861 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
862 struct vmw_dma_buffer *backup;
863 int ret;
864
865 if (likely(res->backup)) {
866 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
867 return 0;
868 }
869
870 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
871 if (unlikely(backup == NULL))
872 return -ENOMEM;
873
874 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
875 res->func->backup_placement,
876 interruptible,
877 &vmw_dmabuf_bo_free);
878 if (unlikely(ret != 0))
879 goto out_no_dmabuf;
880
881 res->backup = backup;
882
883 out_no_dmabuf:
884 return ret;
885 }
886
887 /**
888 * vmw_resource_do_validate - Make a resource up-to-date and visible
889 * to the device.
890 *
891 * @res: The resource to make visible to the device.
892 * @val_buf: Information about a buffer possibly
893 * containing backup data if a bind operation is needed.
894 *
895 * On hardware resource shortage, this function returns -EBUSY and
896 * should be retried once resources have been freed up.
897 */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)898 static int vmw_resource_do_validate(struct vmw_resource *res,
899 struct ttm_validate_buffer *val_buf)
900 {
901 int ret = 0;
902 const struct vmw_res_func *func = res->func;
903
904 if (unlikely(res->id == -1)) {
905 ret = func->create(res);
906 if (unlikely(ret != 0))
907 return ret;
908 }
909
910 if (func->bind &&
911 ((func->needs_backup && list_empty(&res->mob_head) &&
912 val_buf->bo != NULL) ||
913 (!func->needs_backup && val_buf->bo != NULL))) {
914 ret = func->bind(res, val_buf);
915 if (unlikely(ret != 0))
916 goto out_bind_failed;
917 if (func->needs_backup)
918 list_add_tail(&res->mob_head, &res->backup->res_list);
919 }
920
921 /*
922 * Only do this on write operations, and move to
923 * vmw_resource_unreserve if it can be called after
924 * backup buffers have been unreserved. Otherwise
925 * sort out locking.
926 */
927 res->res_dirty = true;
928
929 return 0;
930
931 out_bind_failed:
932 func->destroy(res);
933
934 return ret;
935 }
936
937 /**
938 * vmw_resource_unreserve - Unreserve a resource previously reserved for
939 * command submission.
940 *
941 * @res: Pointer to the struct vmw_resource to unreserve.
942 * @new_backup: Pointer to new backup buffer if command submission
943 * switched.
944 * @new_backup_offset: New backup offset if @new_backup is !NULL.
945 *
946 * Currently unreserving a resource means putting it back on the device's
947 * resource lru list, so that it can be evicted if necessary.
948 */
vmw_resource_unreserve(struct vmw_resource * res,struct vmw_dma_buffer * new_backup,unsigned long new_backup_offset)949 void vmw_resource_unreserve(struct vmw_resource *res,
950 struct vmw_dma_buffer *new_backup,
951 unsigned long new_backup_offset)
952 {
953 struct vmw_private *dev_priv = res->dev_priv;
954
955 if (!list_empty(&res->lru_head))
956 return;
957
958 if (new_backup && new_backup != res->backup) {
959
960 if (res->backup) {
961 BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
962 list_del_init(&res->mob_head);
963 vmw_dmabuf_unreference(&res->backup);
964 }
965
966 res->backup = vmw_dmabuf_reference(new_backup);
967 BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
968 list_add_tail(&res->mob_head, &new_backup->res_list);
969 }
970 if (new_backup)
971 res->backup_offset = new_backup_offset;
972
973 if (!res->func->may_evict)
974 return;
975
976 write_lock(&dev_priv->resource_lock);
977 list_add_tail(&res->lru_head,
978 &res->dev_priv->res_lru[res->func->res_type]);
979 write_unlock(&dev_priv->resource_lock);
980 }
981
982 /**
983 * vmw_resource_check_buffer - Check whether a backup buffer is needed
984 * for a resource and in that case, allocate
985 * one, reserve and validate it.
986 *
987 * @res: The resource for which to allocate a backup buffer.
988 * @interruptible: Whether any sleeps during allocation should be
989 * performed while interruptible.
990 * @val_buf: On successful return contains data about the
991 * reserved and validated backup buffer.
992 */
vmw_resource_check_buffer(struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)993 int vmw_resource_check_buffer(struct vmw_resource *res,
994 bool interruptible,
995 struct ttm_validate_buffer *val_buf)
996 {
997 struct list_head val_list;
998 bool backup_dirty = false;
999 int ret;
1000
1001 if (unlikely(res->backup == NULL)) {
1002 ret = vmw_resource_buf_alloc(res, interruptible);
1003 if (unlikely(ret != 0))
1004 return ret;
1005 }
1006
1007 INIT_LIST_HEAD(&val_list);
1008 val_buf->bo = ttm_bo_reference(&res->backup->base);
1009 list_add_tail(&val_buf->head, &val_list);
1010 ret = ttm_eu_reserve_buffers(&val_list);
1011 if (unlikely(ret != 0))
1012 goto out_no_reserve;
1013
1014 if (res->func->needs_backup && list_empty(&res->mob_head))
1015 return 0;
1016
1017 backup_dirty = res->backup_dirty;
1018 ret = ttm_bo_validate(&res->backup->base,
1019 res->func->backup_placement,
1020 true, false);
1021
1022 if (unlikely(ret != 0))
1023 goto out_no_validate;
1024
1025 return 0;
1026
1027 out_no_validate:
1028 ttm_eu_backoff_reservation(&val_list);
1029 out_no_reserve:
1030 ttm_bo_unref(&val_buf->bo);
1031 if (backup_dirty)
1032 vmw_dmabuf_unreference(&res->backup);
1033
1034 return ret;
1035 }
1036
1037 /**
1038 * vmw_resource_reserve - Reserve a resource for command submission
1039 *
1040 * @res: The resource to reserve.
1041 *
1042 * This function takes the resource off the LRU list and make sure
1043 * a backup buffer is present for guest-backed resources. However,
1044 * the buffer may not be bound to the resource at this point.
1045 *
1046 */
vmw_resource_reserve(struct vmw_resource * res,bool no_backup)1047 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1048 {
1049 struct vmw_private *dev_priv = res->dev_priv;
1050 int ret;
1051
1052 write_lock(&dev_priv->resource_lock);
1053 list_del_init(&res->lru_head);
1054 write_unlock(&dev_priv->resource_lock);
1055
1056 if (res->func->needs_backup && res->backup == NULL &&
1057 !no_backup) {
1058 ret = vmw_resource_buf_alloc(res, true);
1059 if (unlikely(ret != 0))
1060 return ret;
1061 }
1062
1063 return 0;
1064 }
1065
1066 /**
1067 * vmw_resource_backoff_reservation - Unreserve and unreference a
1068 * backup buffer
1069 *.
1070 * @val_buf: Backup buffer information.
1071 */
vmw_resource_backoff_reservation(struct ttm_validate_buffer * val_buf)1072 void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1073 {
1074 struct list_head val_list;
1075
1076 if (likely(val_buf->bo == NULL))
1077 return;
1078
1079 INIT_LIST_HEAD(&val_list);
1080 list_add_tail(&val_buf->head, &val_list);
1081 ttm_eu_backoff_reservation(&val_list);
1082 ttm_bo_unref(&val_buf->bo);
1083 }
1084
1085 /**
1086 * vmw_resource_do_evict - Evict a resource, and transfer its data
1087 * to a backup buffer.
1088 *
1089 * @res: The resource to evict.
1090 */
vmw_resource_do_evict(struct vmw_resource * res)1091 int vmw_resource_do_evict(struct vmw_resource *res)
1092 {
1093 struct ttm_validate_buffer val_buf;
1094 const struct vmw_res_func *func = res->func;
1095 int ret;
1096
1097 BUG_ON(!func->may_evict);
1098
1099 val_buf.bo = NULL;
1100 ret = vmw_resource_check_buffer(res, true, &val_buf);
1101 if (unlikely(ret != 0))
1102 return ret;
1103
1104 if (unlikely(func->unbind != NULL &&
1105 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1106 ret = func->unbind(res, res->res_dirty, &val_buf);
1107 if (unlikely(ret != 0))
1108 goto out_no_unbind;
1109 list_del_init(&res->mob_head);
1110 }
1111 ret = func->destroy(res);
1112 res->backup_dirty = true;
1113 res->res_dirty = false;
1114 out_no_unbind:
1115 vmw_resource_backoff_reservation(&val_buf);
1116
1117 return ret;
1118 }
1119
1120
1121 /**
1122 * vmw_resource_validate - Make a resource up-to-date and visible
1123 * to the device.
1124 *
1125 * @res: The resource to make visible to the device.
1126 *
1127 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1128 * be reserved and validated.
1129 * On hardware resource shortage, this function will repeatedly evict
1130 * resources of the same type until the validation succeeds.
1131 */
vmw_resource_validate(struct vmw_resource * res)1132 int vmw_resource_validate(struct vmw_resource *res)
1133 {
1134 int ret;
1135 struct vmw_resource *evict_res;
1136 struct vmw_private *dev_priv = res->dev_priv;
1137 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1138 struct ttm_validate_buffer val_buf;
1139
1140 if (likely(!res->func->may_evict))
1141 return 0;
1142
1143 val_buf.bo = NULL;
1144 if (res->backup)
1145 val_buf.bo = &res->backup->base;
1146 do {
1147 ret = vmw_resource_do_validate(res, &val_buf);
1148 if (likely(ret != -EBUSY))
1149 break;
1150
1151 write_lock(&dev_priv->resource_lock);
1152 if (list_empty(lru_list) || !res->func->may_evict) {
1153 DRM_ERROR("Out of device device id entries "
1154 "for %s.\n", res->func->type_name);
1155 ret = -EBUSY;
1156 write_unlock(&dev_priv->resource_lock);
1157 break;
1158 }
1159
1160 evict_res = vmw_resource_reference
1161 (list_first_entry(lru_list, struct vmw_resource,
1162 lru_head));
1163 list_del_init(&evict_res->lru_head);
1164
1165 write_unlock(&dev_priv->resource_lock);
1166 vmw_resource_do_evict(evict_res);
1167 vmw_resource_unreference(&evict_res);
1168 } while (1);
1169
1170 if (unlikely(ret != 0))
1171 goto out_no_validate;
1172 else if (!res->func->needs_backup && res->backup) {
1173 list_del_init(&res->mob_head);
1174 vmw_dmabuf_unreference(&res->backup);
1175 }
1176
1177 return 0;
1178
1179 out_no_validate:
1180 return ret;
1181 }
1182
1183 /**
1184 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1185 * object without unreserving it.
1186 *
1187 * @bo: Pointer to the struct ttm_buffer_object to fence.
1188 * @fence: Pointer to the fence. If NULL, this function will
1189 * insert a fence into the command stream..
1190 *
1191 * Contrary to the ttm_eu version of this function, it takes only
1192 * a single buffer object instead of a list, and it also doesn't
1193 * unreserve the buffer object, which needs to be done separately.
1194 */
vmw_fence_single_bo(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1195 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1196 struct vmw_fence_obj *fence)
1197 {
1198 struct ttm_bo_device *bdev = bo->bdev;
1199 struct ttm_bo_driver *driver = bdev->driver;
1200 struct vmw_fence_obj *old_fence_obj;
1201 struct vmw_private *dev_priv =
1202 container_of(bdev, struct vmw_private, bdev);
1203
1204 if (fence == NULL)
1205 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1206 else
1207 driver->sync_obj_ref(fence);
1208
1209 spin_lock(&bdev->fence_lock);
1210
1211 old_fence_obj = bo->sync_obj;
1212 bo->sync_obj = fence;
1213
1214 spin_unlock(&bdev->fence_lock);
1215
1216 if (old_fence_obj)
1217 vmw_fence_obj_unreference(&old_fence_obj);
1218 }
1219
1220 /**
1221 * vmw_resource_move_notify - TTM move_notify_callback
1222 *
1223 * @bo: The TTM buffer object about to move.
1224 * @mem: The truct ttm_mem_reg indicating to what memory
1225 * region the move is taking place.
1226 *
1227 * For now does nothing.
1228 */
vmw_resource_move_notify(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)1229 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1230 struct ttm_mem_reg *mem)
1231 {
1232 }
1233
1234 /**
1235 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1236 *
1237 * @res: The resource being queried.
1238 */
vmw_resource_needs_backup(const struct vmw_resource * res)1239 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1240 {
1241 return res->func->needs_backup;
1242 }
1243
1244 /**
1245 * vmw_resource_evict_type - Evict all resources of a specific type
1246 *
1247 * @dev_priv: Pointer to a device private struct
1248 * @type: The resource type to evict
1249 *
1250 * To avoid thrashing starvation or as part of the hibernation sequence,
1251 * evict all evictable resources of a specific type.
1252 */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)1253 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1254 enum vmw_res_type type)
1255 {
1256 struct list_head *lru_list = &dev_priv->res_lru[type];
1257 struct vmw_resource *evict_res;
1258
1259 do {
1260 write_lock(&dev_priv->resource_lock);
1261
1262 if (list_empty(lru_list))
1263 goto out_unlock;
1264
1265 evict_res = vmw_resource_reference(
1266 list_first_entry(lru_list, struct vmw_resource,
1267 lru_head));
1268 list_del_init(&evict_res->lru_head);
1269 write_unlock(&dev_priv->resource_lock);
1270 vmw_resource_do_evict(evict_res);
1271 vmw_resource_unreference(&evict_res);
1272 } while (1);
1273
1274 out_unlock:
1275 write_unlock(&dev_priv->resource_lock);
1276 }
1277
1278 /**
1279 * vmw_resource_evict_all - Evict all evictable resources
1280 *
1281 * @dev_priv: Pointer to a device private struct
1282 *
1283 * To avoid thrashing starvation or as part of the hibernation sequence,
1284 * evict all evictable resources. In particular this means that all
1285 * guest-backed resources that are registered with the device are
1286 * evicted and the OTable becomes clean.
1287 */
vmw_resource_evict_all(struct vmw_private * dev_priv)1288 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1289 {
1290 enum vmw_res_type type;
1291
1292 mutex_lock(&dev_priv->cmdbuf_mutex);
1293
1294 for (type = 0; type < vmw_res_max; ++type)
1295 vmw_resource_evict_type(dev_priv, type);
1296
1297 mutex_unlock(&dev_priv->cmdbuf_mutex);
1298 }
1299