1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33
34
35 /**
36 * struct vmw_user_buffer_object - User-space-visible buffer object
37 *
38 * @prime: The prime object providing user visibility.
39 * @vbo: The struct vmw_buffer_object
40 */
41 struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44 };
45
46
47 /**
48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49 * vmw_buffer_object.
50 *
51 * @bo: Pointer to the TTM buffer object.
52 * Return: Pointer to the struct vmw_buffer_object embedding the
53 * TTM buffer object.
54 */
55 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 return container_of(bo, struct vmw_buffer_object, base);
59 }
60
61
62 /**
63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64 * vmw_user_buffer_object.
65 *
66 * @bo: Pointer to the TTM buffer object.
67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68 * object.
69 */
70 static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77
78
79 /**
80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
81 *
82 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move.
84 * @placement: The placement to pin it.
85 * @interruptible: Use interruptible wait.
86 * Return: Zero on success, Negative error code on failure. In particular
87 * -ERESTARTSYS if interrupted by a signal
88 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93 {
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 vmw_execbuf_release_pinned_bo(dev_priv);
100
101 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
102 if (unlikely(ret != 0))
103 goto err;
104
105 if (buf->base.pin_count > 0)
106 ret = ttm_bo_mem_compat(placement, bo->resource,
107 &new_flags) == true ? 0 : -EINVAL;
108 else
109 ret = ttm_bo_validate(bo, placement, &ctx);
110
111 if (!ret)
112 vmw_bo_pin_reserved(buf, true);
113
114 ttm_bo_unreserve(bo);
115 err:
116 return ret;
117 }
118
119
120 /**
121 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
122 *
123 * This function takes the reservation_sem in write mode.
124 * Flushes and unpins the query bo to avoid failures.
125 *
126 * @dev_priv: Driver private.
127 * @buf: DMA buffer to move.
128 * @interruptible: Use interruptible wait.
129 * Return: Zero on success, Negative error code on failure. In particular
130 * -ERESTARTSYS if interrupted by a signal
131 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)132 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
133 struct vmw_buffer_object *buf,
134 bool interruptible)
135 {
136 struct ttm_operation_ctx ctx = {interruptible, false };
137 struct ttm_buffer_object *bo = &buf->base;
138 int ret;
139 uint32_t new_flags;
140
141 vmw_execbuf_release_pinned_bo(dev_priv);
142
143 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
144 if (unlikely(ret != 0))
145 goto err;
146
147 if (buf->base.pin_count > 0) {
148 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
149 &new_flags) == true ? 0 : -EINVAL;
150 goto out_unreserve;
151 }
152
153 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
154 if (likely(ret == 0) || ret == -ERESTARTSYS)
155 goto out_unreserve;
156
157 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
158
159 out_unreserve:
160 if (!ret)
161 vmw_bo_pin_reserved(buf, true);
162
163 ttm_bo_unreserve(bo);
164 err:
165 return ret;
166 }
167
168
169 /**
170 * vmw_bo_pin_in_vram - Move a buffer to vram.
171 *
172 * This function takes the reservation_sem in write mode.
173 * Flushes and unpins the query bo to avoid failures.
174 *
175 * @dev_priv: Driver private.
176 * @buf: DMA buffer to move.
177 * @interruptible: Use interruptible wait.
178 * Return: Zero on success, Negative error code on failure. In particular
179 * -ERESTARTSYS if interrupted by a signal
180 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)181 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
182 struct vmw_buffer_object *buf,
183 bool interruptible)
184 {
185 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
186 interruptible);
187 }
188
189
190 /**
191 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
192 *
193 * This function takes the reservation_sem in write mode.
194 * Flushes and unpins the query bo to avoid failures.
195 *
196 * @dev_priv: Driver private.
197 * @buf: DMA buffer to pin.
198 * @interruptible: Use interruptible wait.
199 * Return: Zero on success, Negative error code on failure. In particular
200 * -ERESTARTSYS if interrupted by a signal
201 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)202 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
203 struct vmw_buffer_object *buf,
204 bool interruptible)
205 {
206 struct ttm_operation_ctx ctx = {interruptible, false };
207 struct ttm_buffer_object *bo = &buf->base;
208 struct ttm_placement placement;
209 struct ttm_place place;
210 int ret = 0;
211 uint32_t new_flags;
212
213 place = vmw_vram_placement.placement[0];
214 place.lpfn = bo->resource->num_pages;
215 placement.num_placement = 1;
216 placement.placement = &place;
217 placement.num_busy_placement = 1;
218 placement.busy_placement = &place;
219
220 vmw_execbuf_release_pinned_bo(dev_priv);
221 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
222 if (unlikely(ret != 0))
223 goto err_unlock;
224
225 /*
226 * Is this buffer already in vram but not at the start of it?
227 * In that case, evict it first because TTM isn't good at handling
228 * that situation.
229 */
230 if (bo->resource->mem_type == TTM_PL_VRAM &&
231 bo->resource->start < bo->resource->num_pages &&
232 bo->resource->start > 0 &&
233 buf->base.pin_count == 0) {
234 ctx.interruptible = false;
235 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
236 }
237
238 if (buf->base.pin_count > 0)
239 ret = ttm_bo_mem_compat(&placement, bo->resource,
240 &new_flags) == true ? 0 : -EINVAL;
241 else
242 ret = ttm_bo_validate(bo, &placement, &ctx);
243
244 /* For some reason we didn't end up at the start of vram */
245 WARN_ON(ret == 0 && bo->resource->start != 0);
246 if (!ret)
247 vmw_bo_pin_reserved(buf, true);
248
249 ttm_bo_unreserve(bo);
250 err_unlock:
251
252 return ret;
253 }
254
255
256 /**
257 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
258 *
259 * This function takes the reservation_sem in write mode.
260 *
261 * @dev_priv: Driver private.
262 * @buf: DMA buffer to unpin.
263 * @interruptible: Use interruptible wait.
264 * Return: Zero on success, Negative error code on failure. In particular
265 * -ERESTARTSYS if interrupted by a signal
266 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)267 int vmw_bo_unpin(struct vmw_private *dev_priv,
268 struct vmw_buffer_object *buf,
269 bool interruptible)
270 {
271 struct ttm_buffer_object *bo = &buf->base;
272 int ret;
273
274 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
275 if (unlikely(ret != 0))
276 goto err;
277
278 vmw_bo_pin_reserved(buf, false);
279
280 ttm_bo_unreserve(bo);
281
282 err:
283 return ret;
284 }
285
286 /**
287 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
288 * of a buffer.
289 *
290 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
291 * @ptr: SVGAGuestPtr returning the result.
292 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)293 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
294 SVGAGuestPtr *ptr)
295 {
296 if (bo->resource->mem_type == TTM_PL_VRAM) {
297 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
298 ptr->offset = bo->resource->start << PAGE_SHIFT;
299 } else {
300 ptr->gmrId = bo->resource->start;
301 ptr->offset = 0;
302 }
303 }
304
305
306 /**
307 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
308 *
309 * @vbo: The buffer object. Must be reserved.
310 * @pin: Whether to pin or unpin.
311 *
312 */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)313 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
314 {
315 struct ttm_operation_ctx ctx = { false, true };
316 struct ttm_place pl;
317 struct ttm_placement placement;
318 struct ttm_buffer_object *bo = &vbo->base;
319 uint32_t old_mem_type = bo->resource->mem_type;
320 int ret;
321
322 dma_resv_assert_held(bo->base.resv);
323
324 if (pin == !!bo->pin_count)
325 return;
326
327 pl.fpfn = 0;
328 pl.lpfn = 0;
329 pl.mem_type = bo->resource->mem_type;
330 pl.flags = bo->resource->placement;
331
332 memset(&placement, 0, sizeof(placement));
333 placement.num_placement = 1;
334 placement.placement = &pl;
335
336 ret = ttm_bo_validate(bo, &placement, &ctx);
337
338 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
339
340 if (pin)
341 ttm_bo_pin(bo);
342 else
343 ttm_bo_unpin(bo);
344 }
345
346 /**
347 * vmw_bo_map_and_cache - Map a buffer object and cache the map
348 *
349 * @vbo: The buffer object to map
350 * Return: A kernel virtual address or NULL if mapping failed.
351 *
352 * This function maps a buffer object into the kernel address space, or
353 * returns the virtual kernel address of an already existing map. The virtual
354 * address remains valid as long as the buffer object is pinned or reserved.
355 * The cached map is torn down on either
356 * 1) Buffer object move
357 * 2) Buffer object swapout
358 * 3) Buffer object destruction
359 *
360 */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)361 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
362 {
363 struct ttm_buffer_object *bo = &vbo->base;
364 bool not_used;
365 void *virtual;
366 int ret;
367
368 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
369 if (virtual)
370 return virtual;
371
372 ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
373 if (ret)
374 DRM_ERROR("Buffer object map failed: %d.\n", ret);
375
376 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
377 }
378
379
380 /**
381 * vmw_bo_unmap - Tear down a cached buffer object map.
382 *
383 * @vbo: The buffer object whose map we are tearing down.
384 *
385 * This function tears down a cached map set up using
386 * vmw_buffer_object_map_and_cache().
387 */
vmw_bo_unmap(struct vmw_buffer_object * vbo)388 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
389 {
390 if (vbo->map.bo == NULL)
391 return;
392
393 ttm_bo_kunmap(&vbo->map);
394 }
395
396
397 /**
398 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
399 *
400 * @dev_priv: Pointer to a struct vmw_private identifying the device.
401 * @size: The requested buffer size.
402 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
403 */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)404 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
405 bool user)
406 {
407 static size_t struct_size, user_struct_size;
408 size_t num_pages = PFN_UP(size);
409 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
410
411 if (unlikely(struct_size == 0)) {
412 size_t backend_size = ttm_round_pot(vmw_tt_size);
413
414 struct_size = backend_size +
415 ttm_round_pot(sizeof(struct vmw_buffer_object));
416 user_struct_size = backend_size +
417 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
418 TTM_OBJ_EXTRA_SIZE;
419 }
420
421 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
422 page_array_size +=
423 ttm_round_pot(num_pages * sizeof(dma_addr_t));
424
425 return ((user) ? user_struct_size : struct_size) +
426 page_array_size;
427 }
428
429
430 /**
431 * vmw_bo_bo_free - vmw buffer object destructor
432 *
433 * @bo: Pointer to the embedded struct ttm_buffer_object
434 */
vmw_bo_bo_free(struct ttm_buffer_object * bo)435 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
436 {
437 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
438
439 WARN_ON(vmw_bo->dirty);
440 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
441 vmw_bo_unmap(vmw_bo);
442 dma_resv_fini(&bo->base._resv);
443 kfree(vmw_bo);
444 }
445
446
447 /**
448 * vmw_user_bo_destroy - vmw buffer object destructor
449 *
450 * @bo: Pointer to the embedded struct ttm_buffer_object
451 */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)452 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
453 {
454 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
455 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
456
457 WARN_ON(vbo->dirty);
458 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
459 vmw_bo_unmap(vbo);
460 ttm_prime_object_kfree(vmw_user_bo, prime);
461 }
462
463 /**
464 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
465 *
466 * @dev_priv: Pointer to the device private struct
467 * @size: size of the BO we need
468 * @placement: where to put it
469 * @p_bo: resulting BO
470 *
471 * Creates and pin a simple BO for in kernel use.
472 */
vmw_bo_create_kernel(struct vmw_private * dev_priv,unsigned long size,struct ttm_placement * placement,struct ttm_buffer_object ** p_bo)473 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
474 struct ttm_placement *placement,
475 struct ttm_buffer_object **p_bo)
476 {
477 struct ttm_operation_ctx ctx = { false, false };
478 struct ttm_buffer_object *bo;
479 size_t acc_size;
480 int ret;
481
482 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
483 if (unlikely(!bo))
484 return -ENOMEM;
485
486 acc_size = ttm_round_pot(sizeof(*bo));
487 acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
488 acc_size += ttm_round_pot(sizeof(struct ttm_tt));
489
490 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
491 if (unlikely(ret))
492 goto error_free;
493
494
495 bo->base.size = size;
496 dma_resv_init(&bo->base._resv);
497 drm_vma_node_reset(&bo->base.vma_node);
498
499 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
500 ttm_bo_type_device, placement, 0,
501 &ctx, NULL, NULL, NULL);
502 if (unlikely(ret))
503 goto error_account;
504
505 ttm_bo_pin(bo);
506 ttm_bo_unreserve(bo);
507 *p_bo = bo;
508
509 return 0;
510
511 error_account:
512 ttm_mem_global_free(&ttm_mem_glob, acc_size);
513
514 error_free:
515 kfree(bo);
516 return ret;
517 }
518
519 /**
520 * vmw_bo_init - Initialize a vmw buffer object
521 *
522 * @dev_priv: Pointer to the device private struct
523 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
524 * @size: Buffer object size in bytes.
525 * @placement: Initial placement.
526 * @interruptible: Whether waits should be performed interruptible.
527 * @pin: If the BO should be created pinned at a fixed location.
528 * @bo_free: The buffer object destructor.
529 * Returns: Zero on success, negative error code on error.
530 *
531 * Note that on error, the code will free the buffer object.
532 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,bool pin,void (* bo_free)(struct ttm_buffer_object * bo))533 int vmw_bo_init(struct vmw_private *dev_priv,
534 struct vmw_buffer_object *vmw_bo,
535 size_t size, struct ttm_placement *placement,
536 bool interruptible, bool pin,
537 void (*bo_free)(struct ttm_buffer_object *bo))
538 {
539 struct ttm_operation_ctx ctx = { interruptible, false };
540 struct ttm_device *bdev = &dev_priv->bdev;
541 size_t acc_size;
542 int ret;
543 bool user = (bo_free == &vmw_user_bo_destroy);
544
545 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
546
547 acc_size = vmw_bo_acc_size(dev_priv, size, user);
548 memset(vmw_bo, 0, sizeof(*vmw_bo));
549 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
550 vmw_bo->base.priority = 3;
551 vmw_bo->res_tree = RB_ROOT;
552
553 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
554 if (unlikely(ret))
555 return ret;
556
557 vmw_bo->base.base.size = size;
558 dma_resv_init(&vmw_bo->base.base._resv);
559 drm_vma_node_reset(&vmw_bo->base.base.vma_node);
560
561 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
562 ttm_bo_type_device, placement,
563 0, &ctx, NULL, NULL, bo_free);
564 if (unlikely(ret)) {
565 ttm_mem_global_free(&ttm_mem_glob, acc_size);
566 return ret;
567 }
568
569 if (pin)
570 ttm_bo_pin(&vmw_bo->base);
571 ttm_bo_unreserve(&vmw_bo->base);
572 return 0;
573 }
574
575
576 /**
577 * vmw_user_bo_release - TTM reference base object release callback for
578 * vmw user buffer objects
579 *
580 * @p_base: The TTM base object pointer about to be unreferenced.
581 *
582 * Clears the TTM base object pointer and drops the reference the
583 * base object has on the underlying struct vmw_buffer_object.
584 */
vmw_user_bo_release(struct ttm_base_object ** p_base)585 static void vmw_user_bo_release(struct ttm_base_object **p_base)
586 {
587 struct vmw_user_buffer_object *vmw_user_bo;
588 struct ttm_base_object *base = *p_base;
589
590 *p_base = NULL;
591
592 if (unlikely(base == NULL))
593 return;
594
595 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
596 prime.base);
597 ttm_bo_put(&vmw_user_bo->vbo.base);
598 }
599
600
601 /**
602 * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
603 * for vmw user buffer objects
604 *
605 * @base: Pointer to the TTM base object
606 * @ref_type: Reference type of the reference reaching zero.
607 *
608 * Called when user-space drops its last synccpu reference on the buffer
609 * object, Either explicitly or as part of a cleanup file close.
610 */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)611 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
612 enum ttm_ref_type ref_type)
613 {
614 struct vmw_user_buffer_object *user_bo;
615
616 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
617
618 switch (ref_type) {
619 case TTM_REF_SYNCCPU_WRITE:
620 atomic_dec(&user_bo->vbo.cpu_writers);
621 break;
622 default:
623 WARN_ONCE(true, "Undefined buffer object reference release.\n");
624 }
625 }
626
627
628 /**
629 * vmw_user_bo_alloc - Allocate a user buffer object
630 *
631 * @dev_priv: Pointer to a struct device private.
632 * @tfile: Pointer to a struct ttm_object_file on which to register the user
633 * object.
634 * @size: Size of the buffer object.
635 * @shareable: Boolean whether the buffer is shareable with other open files.
636 * @handle: Pointer to where the handle value should be assigned.
637 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
638 * should be assigned.
639 * @p_base: The TTM base object pointer about to be allocated.
640 * Return: Zero on success, negative error code on error.
641 */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)642 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
643 struct ttm_object_file *tfile,
644 uint32_t size,
645 bool shareable,
646 uint32_t *handle,
647 struct vmw_buffer_object **p_vbo,
648 struct ttm_base_object **p_base)
649 {
650 struct vmw_user_buffer_object *user_bo;
651 int ret;
652
653 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
654 if (unlikely(!user_bo)) {
655 DRM_ERROR("Failed to allocate a buffer.\n");
656 return -ENOMEM;
657 }
658
659 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
660 (dev_priv->has_mob) ?
661 &vmw_sys_placement :
662 &vmw_vram_sys_placement, true, false,
663 &vmw_user_bo_destroy);
664 if (unlikely(ret != 0))
665 return ret;
666
667 ttm_bo_get(&user_bo->vbo.base);
668 ret = ttm_prime_object_init(tfile,
669 size,
670 &user_bo->prime,
671 shareable,
672 ttm_buffer_type,
673 &vmw_user_bo_release,
674 &vmw_user_bo_ref_obj_release);
675 if (unlikely(ret != 0)) {
676 ttm_bo_put(&user_bo->vbo.base);
677 goto out_no_base_object;
678 }
679
680 *p_vbo = &user_bo->vbo;
681 if (p_base) {
682 *p_base = &user_bo->prime.base;
683 kref_get(&(*p_base)->refcount);
684 }
685 *handle = user_bo->prime.base.handle;
686
687 out_no_base_object:
688 return ret;
689 }
690
691
692 /**
693 * vmw_user_bo_verify_access - verify access permissions on this
694 * buffer object.
695 *
696 * @bo: Pointer to the buffer object being accessed
697 * @tfile: Identifying the caller.
698 */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)699 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
700 struct ttm_object_file *tfile)
701 {
702 struct vmw_user_buffer_object *vmw_user_bo;
703
704 if (unlikely(bo->destroy != vmw_user_bo_destroy))
705 return -EPERM;
706
707 vmw_user_bo = vmw_user_buffer_object(bo);
708
709 /* Check that the caller has opened the object. */
710 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
711 return 0;
712
713 DRM_ERROR("Could not grant buffer access.\n");
714 return -EPERM;
715 }
716
717
718 /**
719 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
720 * access, idling previous GPU operations on the buffer and optionally
721 * blocking it for further command submissions.
722 *
723 * @user_bo: Pointer to the buffer object being grabbed for CPU access
724 * @tfile: Identifying the caller.
725 * @flags: Flags indicating how the grab should be performed.
726 * Return: Zero on success, Negative error code on error. In particular,
727 * -EBUSY will be returned if a dontblock operation is requested and the
728 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
729 * interrupted by a signal.
730 *
731 * A blocking grab will be automatically released when @tfile is closed.
732 */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)733 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
734 struct ttm_object_file *tfile,
735 uint32_t flags)
736 {
737 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
738 struct ttm_buffer_object *bo = &user_bo->vbo.base;
739 bool existed;
740 int ret;
741
742 if (flags & drm_vmw_synccpu_allow_cs) {
743 long lret;
744
745 lret = dma_resv_wait_timeout(bo->base.resv, true, true,
746 nonblock ? 0 :
747 MAX_SCHEDULE_TIMEOUT);
748 if (!lret)
749 return -EBUSY;
750 else if (lret < 0)
751 return lret;
752 return 0;
753 }
754
755 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
756 if (unlikely(ret != 0))
757 return ret;
758
759 ret = ttm_bo_wait(bo, true, nonblock);
760 if (likely(ret == 0))
761 atomic_inc(&user_bo->vbo.cpu_writers);
762
763 ttm_bo_unreserve(bo);
764 if (unlikely(ret != 0))
765 return ret;
766
767 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
768 TTM_REF_SYNCCPU_WRITE, &existed, false);
769 if (ret != 0 || existed)
770 atomic_dec(&user_bo->vbo.cpu_writers);
771
772 return ret;
773 }
774
775 /**
776 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
777 * and unblock command submission on the buffer if blocked.
778 *
779 * @handle: Handle identifying the buffer object.
780 * @tfile: Identifying the caller.
781 * @flags: Flags indicating the type of release.
782 */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)783 static int vmw_user_bo_synccpu_release(uint32_t handle,
784 struct ttm_object_file *tfile,
785 uint32_t flags)
786 {
787 if (!(flags & drm_vmw_synccpu_allow_cs))
788 return ttm_ref_object_base_unref(tfile, handle,
789 TTM_REF_SYNCCPU_WRITE);
790
791 return 0;
792 }
793
794
795 /**
796 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
797 * functionality.
798 *
799 * @dev: Identifies the drm device.
800 * @data: Pointer to the ioctl argument.
801 * @file_priv: Identifies the caller.
802 * Return: Zero on success, negative error code on error.
803 *
804 * This function checks the ioctl arguments for validity and calls the
805 * relevant synccpu functions.
806 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)807 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
808 struct drm_file *file_priv)
809 {
810 struct drm_vmw_synccpu_arg *arg =
811 (struct drm_vmw_synccpu_arg *) data;
812 struct vmw_buffer_object *vbo;
813 struct vmw_user_buffer_object *user_bo;
814 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
815 struct ttm_base_object *buffer_base;
816 int ret;
817
818 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
819 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
820 drm_vmw_synccpu_dontblock |
821 drm_vmw_synccpu_allow_cs)) != 0) {
822 DRM_ERROR("Illegal synccpu flags.\n");
823 return -EINVAL;
824 }
825
826 switch (arg->op) {
827 case drm_vmw_synccpu_grab:
828 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
829 &buffer_base);
830 if (unlikely(ret != 0))
831 return ret;
832
833 user_bo = container_of(vbo, struct vmw_user_buffer_object,
834 vbo);
835 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
836 vmw_bo_unreference(&vbo);
837 ttm_base_object_unref(&buffer_base);
838 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
839 ret != -EBUSY)) {
840 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
841 (unsigned int) arg->handle);
842 return ret;
843 }
844 break;
845 case drm_vmw_synccpu_release:
846 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
847 arg->flags);
848 if (unlikely(ret != 0)) {
849 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
850 (unsigned int) arg->handle);
851 return ret;
852 }
853 break;
854 default:
855 DRM_ERROR("Invalid synccpu operation.\n");
856 return -EINVAL;
857 }
858
859 return 0;
860 }
861
862
863 /**
864 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
865 * allocation functionality.
866 *
867 * @dev: Identifies the drm device.
868 * @data: Pointer to the ioctl argument.
869 * @file_priv: Identifies the caller.
870 * Return: Zero on success, negative error code on error.
871 *
872 * This function checks the ioctl arguments for validity and allocates a
873 * struct vmw_user_buffer_object bo.
874 */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)875 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file_priv)
877 {
878 struct vmw_private *dev_priv = vmw_priv(dev);
879 union drm_vmw_alloc_dmabuf_arg *arg =
880 (union drm_vmw_alloc_dmabuf_arg *)data;
881 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
882 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
883 struct vmw_buffer_object *vbo;
884 uint32_t handle;
885 int ret;
886
887 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
888 req->size, false, &handle, &vbo,
889 NULL);
890 if (unlikely(ret != 0))
891 goto out_no_bo;
892
893 rep->handle = handle;
894 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
895 rep->cur_gmr_id = handle;
896 rep->cur_gmr_offset = 0;
897
898 vmw_bo_unreference(&vbo);
899
900 out_no_bo:
901
902 return ret;
903 }
904
905
906 /**
907 * vmw_bo_unref_ioctl - Generic handle close ioctl.
908 *
909 * @dev: Identifies the drm device.
910 * @data: Pointer to the ioctl argument.
911 * @file_priv: Identifies the caller.
912 * Return: Zero on success, negative error code on error.
913 *
914 * This function checks the ioctl arguments for validity and closes a
915 * handle to a TTM base object, optionally freeing the object.
916 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)917 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
919 {
920 struct drm_vmw_unref_dmabuf_arg *arg =
921 (struct drm_vmw_unref_dmabuf_arg *)data;
922
923 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
924 arg->handle,
925 TTM_REF_USAGE);
926 }
927
928
929 /**
930 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
931 *
932 * @tfile: The TTM object file the handle is registered with.
933 * @handle: The user buffer object handle
934 * @out: Pointer to a where a pointer to the embedded
935 * struct vmw_buffer_object should be placed.
936 * @p_base: Pointer to where a pointer to the TTM base object should be
937 * placed, or NULL if no such pointer is required.
938 * Return: Zero on success, Negative error code on error.
939 *
940 * Both the output base object pointer and the vmw buffer object pointer
941 * will be refcounted.
942 */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)943 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
944 uint32_t handle, struct vmw_buffer_object **out,
945 struct ttm_base_object **p_base)
946 {
947 struct vmw_user_buffer_object *vmw_user_bo;
948 struct ttm_base_object *base;
949
950 base = ttm_base_object_lookup(tfile, handle);
951 if (unlikely(base == NULL)) {
952 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
953 (unsigned long)handle);
954 return -ESRCH;
955 }
956
957 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
958 ttm_base_object_unref(&base);
959 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
960 (unsigned long)handle);
961 return -EINVAL;
962 }
963
964 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
965 prime.base);
966 ttm_bo_get(&vmw_user_bo->vbo.base);
967 if (p_base)
968 *p_base = base;
969 else
970 ttm_base_object_unref(&base);
971 *out = &vmw_user_bo->vbo;
972
973 return 0;
974 }
975
976 /**
977 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
978 * @tfile: The TTM object file the handle is registered with.
979 * @handle: The user buffer object handle.
980 *
981 * This function looks up a struct vmw_user_bo and returns a pointer to the
982 * struct vmw_buffer_object it derives from without refcounting the pointer.
983 * The returned pointer is only valid until vmw_user_bo_noref_release() is
984 * called, and the object pointed to by the returned pointer may be doomed.
985 * Any persistent usage of the object requires a refcount to be taken using
986 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
987 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
988 * or scheduling functions may be called inbetween these function calls.
989 *
990 * Return: A struct vmw_buffer_object pointer if successful or negative
991 * error pointer on failure.
992 */
993 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)994 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
995 {
996 struct vmw_user_buffer_object *vmw_user_bo;
997 struct ttm_base_object *base;
998
999 base = ttm_base_object_noref_lookup(tfile, handle);
1000 if (!base) {
1001 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1002 (unsigned long)handle);
1003 return ERR_PTR(-ESRCH);
1004 }
1005
1006 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1007 ttm_base_object_noref_release();
1008 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1009 (unsigned long)handle);
1010 return ERR_PTR(-EINVAL);
1011 }
1012
1013 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1014 prime.base);
1015 return &vmw_user_bo->vbo;
1016 }
1017
1018 /**
1019 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
1020 *
1021 * @tfile: The TTM object file to register the handle with.
1022 * @vbo: The embedded vmw buffer object.
1023 * @handle: Pointer to where the new handle should be placed.
1024 * Return: Zero on success, Negative error code on error.
1025 */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)1026 int vmw_user_bo_reference(struct ttm_object_file *tfile,
1027 struct vmw_buffer_object *vbo,
1028 uint32_t *handle)
1029 {
1030 struct vmw_user_buffer_object *user_bo;
1031
1032 if (vbo->base.destroy != vmw_user_bo_destroy)
1033 return -EINVAL;
1034
1035 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1036
1037 *handle = user_bo->prime.base.handle;
1038 return ttm_ref_object_add(tfile, &user_bo->prime.base,
1039 TTM_REF_USAGE, NULL, false);
1040 }
1041
1042
1043 /**
1044 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1045 * object without unreserving it.
1046 *
1047 * @bo: Pointer to the struct ttm_buffer_object to fence.
1048 * @fence: Pointer to the fence. If NULL, this function will
1049 * insert a fence into the command stream..
1050 *
1051 * Contrary to the ttm_eu version of this function, it takes only
1052 * a single buffer object instead of a list, and it also doesn't
1053 * unreserve the buffer object, which needs to be done separately.
1054 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1055 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1056 struct vmw_fence_obj *fence)
1057 {
1058 struct ttm_device *bdev = bo->bdev;
1059
1060 struct vmw_private *dev_priv =
1061 container_of(bdev, struct vmw_private, bdev);
1062
1063 if (fence == NULL) {
1064 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1065 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1066 dma_fence_put(&fence->base);
1067 } else
1068 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1069 }
1070
1071
1072 /**
1073 * vmw_dumb_create - Create a dumb kms buffer
1074 *
1075 * @file_priv: Pointer to a struct drm_file identifying the caller.
1076 * @dev: Pointer to the drm device.
1077 * @args: Pointer to a struct drm_mode_create_dumb structure
1078 * Return: Zero on success, negative error code on failure.
1079 *
1080 * This is a driver callback for the core drm create_dumb functionality.
1081 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1082 * that the arguments have a different format.
1083 */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1084 int vmw_dumb_create(struct drm_file *file_priv,
1085 struct drm_device *dev,
1086 struct drm_mode_create_dumb *args)
1087 {
1088 struct vmw_private *dev_priv = vmw_priv(dev);
1089 struct vmw_buffer_object *vbo;
1090 int ret;
1091
1092 args->pitch = args->width * ((args->bpp + 7) / 8);
1093 args->size = args->pitch * args->height;
1094
1095 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1096 args->size, false, &args->handle,
1097 &vbo, NULL);
1098 if (unlikely(ret != 0))
1099 goto out_no_bo;
1100
1101 vmw_bo_unreference(&vbo);
1102 out_no_bo:
1103 return ret;
1104 }
1105
1106
1107 /**
1108 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1109 *
1110 * @file_priv: Pointer to a struct drm_file identifying the caller.
1111 * @dev: Pointer to the drm device.
1112 * @handle: Handle identifying the dumb buffer.
1113 * @offset: The address space offset returned.
1114 * Return: Zero on success, negative error code on failure.
1115 *
1116 * This is a driver callback for the core drm dumb_map_offset functionality.
1117 */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1118 int vmw_dumb_map_offset(struct drm_file *file_priv,
1119 struct drm_device *dev, uint32_t handle,
1120 uint64_t *offset)
1121 {
1122 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1123 struct vmw_buffer_object *out_buf;
1124 int ret;
1125
1126 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1127 if (ret != 0)
1128 return -EINVAL;
1129
1130 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1131 vmw_bo_unreference(&out_buf);
1132 return 0;
1133 }
1134
1135
1136 /**
1137 * vmw_dumb_destroy - Destroy a dumb boffer
1138 *
1139 * @file_priv: Pointer to a struct drm_file identifying the caller.
1140 * @dev: Pointer to the drm device.
1141 * @handle: Handle identifying the dumb buffer.
1142 * Return: Zero on success, negative error code on failure.
1143 *
1144 * This is a driver callback for the core drm dumb_destroy functionality.
1145 */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1146 int vmw_dumb_destroy(struct drm_file *file_priv,
1147 struct drm_device *dev,
1148 uint32_t handle)
1149 {
1150 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1151 handle, TTM_REF_USAGE);
1152 }
1153
1154
1155 /**
1156 * vmw_bo_swap_notify - swapout notify callback.
1157 *
1158 * @bo: The buffer object to be swapped out.
1159 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1160 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1161 {
1162 /* Is @bo embedded in a struct vmw_buffer_object? */
1163 if (bo->destroy != vmw_bo_bo_free &&
1164 bo->destroy != vmw_user_bo_destroy)
1165 return;
1166
1167 /* Kill any cached kernel maps before swapout */
1168 vmw_bo_unmap(vmw_buffer_object(bo));
1169 }
1170
1171
1172 /**
1173 * vmw_bo_move_notify - TTM move_notify_callback
1174 *
1175 * @bo: The TTM buffer object about to move.
1176 * @mem: The struct ttm_resource indicating to what memory
1177 * region the move is taking place.
1178 *
1179 * Detaches cached maps and device bindings that require that the
1180 * buffer doesn't move.
1181 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)1182 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1183 struct ttm_resource *mem)
1184 {
1185 struct vmw_buffer_object *vbo;
1186
1187 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1188 if (bo->destroy != vmw_bo_bo_free &&
1189 bo->destroy != vmw_user_bo_destroy)
1190 return;
1191
1192 vbo = container_of(bo, struct vmw_buffer_object, base);
1193
1194 /*
1195 * Kill any cached kernel maps before move to or from VRAM.
1196 * With other types of moves, the underlying pages stay the same,
1197 * and the map can be kept.
1198 */
1199 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
1200 vmw_bo_unmap(vbo);
1201
1202 /*
1203 * If we're moving a backup MOB out of MOB placement, then make sure we
1204 * read back all resource content first, and unbind the MOB from
1205 * the resource.
1206 */
1207 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
1208 vmw_resource_unbind_list(vbo);
1209 }
1210