1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33
34
35 /**
36 * struct vmw_user_buffer_object - User-space-visible buffer object
37 *
38 * @prime: The prime object providing user visibility.
39 * @vbo: The struct vmw_buffer_object
40 */
41 struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44 };
45
46
47 /**
48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49 * vmw_buffer_object.
50 *
51 * @bo: Pointer to the TTM buffer object.
52 * Return: Pointer to the struct vmw_buffer_object embedding the
53 * TTM buffer object.
54 */
55 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 return container_of(bo, struct vmw_buffer_object, base);
59 }
60
61
62 /**
63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64 * vmw_user_buffer_object.
65 *
66 * @bo: Pointer to the TTM buffer object.
67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68 * object.
69 */
70 static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77
78
79 /**
80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
81 *
82 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move.
84 * @placement: The placement to pin it.
85 * @interruptible: Use interruptible wait.
86 * Return: Zero on success, Negative error code on failure. In particular
87 * -ERESTARTSYS if interrupted by a signal
88 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93 {
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120 err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123 }
124
125
126 /**
127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128 *
129 * This function takes the reservation_sem in write mode.
130 * Flushes and unpins the query bo to avoid failures.
131 *
132 * @dev_priv: Driver private.
133 * @buf: DMA buffer to move.
134 * @pin: Pin buffer if true.
135 * @interruptible: Use interruptible wait.
136 * Return: Zero on success, Negative error code on failure. In particular
137 * -ERESTARTSYS if interrupted by a signal
138 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)139 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 struct vmw_buffer_object *buf,
141 bool interruptible)
142 {
143 struct ttm_operation_ctx ctx = {interruptible, false };
144 struct ttm_buffer_object *bo = &buf->base;
145 int ret;
146 uint32_t new_flags;
147
148 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 if (unlikely(ret != 0))
150 return ret;
151
152 vmw_execbuf_release_pinned_bo(dev_priv);
153
154 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 if (unlikely(ret != 0))
156 goto err;
157
158 if (buf->pin_count > 0) {
159 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 &new_flags) == true ? 0 : -EINVAL;
161 goto out_unreserve;
162 }
163
164 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 if (likely(ret == 0) || ret == -ERESTARTSYS)
166 goto out_unreserve;
167
168 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169
170 out_unreserve:
171 if (!ret)
172 vmw_bo_pin_reserved(buf, true);
173
174 ttm_bo_unreserve(bo);
175 err:
176 ttm_write_unlock(&dev_priv->reservation_sem);
177 return ret;
178 }
179
180
181 /**
182 * vmw_bo_pin_in_vram - Move a buffer to vram.
183 *
184 * This function takes the reservation_sem in write mode.
185 * Flushes and unpins the query bo to avoid failures.
186 *
187 * @dev_priv: Driver private.
188 * @buf: DMA buffer to move.
189 * @interruptible: Use interruptible wait.
190 * Return: Zero on success, Negative error code on failure. In particular
191 * -ERESTARTSYS if interrupted by a signal
192 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)193 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 struct vmw_buffer_object *buf,
195 bool interruptible)
196 {
197 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 interruptible);
199 }
200
201
202 /**
203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
204 *
205 * This function takes the reservation_sem in write mode.
206 * Flushes and unpins the query bo to avoid failures.
207 *
208 * @dev_priv: Driver private.
209 * @buf: DMA buffer to pin.
210 * @interruptible: Use interruptible wait.
211 * Return: Zero on success, Negative error code on failure. In particular
212 * -ERESTARTSYS if interrupted by a signal
213 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)214 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 struct vmw_buffer_object *buf,
216 bool interruptible)
217 {
218 struct ttm_operation_ctx ctx = {interruptible, false };
219 struct ttm_buffer_object *bo = &buf->base;
220 struct ttm_placement placement;
221 struct ttm_place place;
222 int ret = 0;
223 uint32_t new_flags;
224
225 place = vmw_vram_placement.placement[0];
226 place.lpfn = bo->num_pages;
227 placement.num_placement = 1;
228 placement.placement = &place;
229 placement.num_busy_placement = 1;
230 placement.busy_placement = &place;
231
232 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 if (unlikely(ret != 0))
234 return ret;
235
236 vmw_execbuf_release_pinned_bo(dev_priv);
237 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 if (unlikely(ret != 0))
239 goto err_unlock;
240
241 /*
242 * Is this buffer already in vram but not at the start of it?
243 * In that case, evict it first because TTM isn't good at handling
244 * that situation.
245 */
246 if (bo->mem.mem_type == TTM_PL_VRAM &&
247 bo->mem.start < bo->num_pages &&
248 bo->mem.start > 0 &&
249 buf->pin_count == 0) {
250 ctx.interruptible = false;
251 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 }
253
254 if (buf->pin_count > 0)
255 ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 &new_flags) == true ? 0 : -EINVAL;
257 else
258 ret = ttm_bo_validate(bo, &placement, &ctx);
259
260 /* For some reason we didn't end up at the start of vram */
261 WARN_ON(ret == 0 && bo->mem.start != 0);
262 if (!ret)
263 vmw_bo_pin_reserved(buf, true);
264
265 ttm_bo_unreserve(bo);
266 err_unlock:
267 ttm_write_unlock(&dev_priv->reservation_sem);
268
269 return ret;
270 }
271
272
273 /**
274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
275 *
276 * This function takes the reservation_sem in write mode.
277 *
278 * @dev_priv: Driver private.
279 * @buf: DMA buffer to unpin.
280 * @interruptible: Use interruptible wait.
281 * Return: Zero on success, Negative error code on failure. In particular
282 * -ERESTARTSYS if interrupted by a signal
283 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)284 int vmw_bo_unpin(struct vmw_private *dev_priv,
285 struct vmw_buffer_object *buf,
286 bool interruptible)
287 {
288 struct ttm_buffer_object *bo = &buf->base;
289 int ret;
290
291 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 if (unlikely(ret != 0))
293 return ret;
294
295 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 if (unlikely(ret != 0))
297 goto err;
298
299 vmw_bo_pin_reserved(buf, false);
300
301 ttm_bo_unreserve(bo);
302
303 err:
304 ttm_read_unlock(&dev_priv->reservation_sem);
305 return ret;
306 }
307
308 /**
309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
310 * of a buffer.
311 *
312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
313 * @ptr: SVGAGuestPtr returning the result.
314 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)315 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 SVGAGuestPtr *ptr)
317 {
318 if (bo->mem.mem_type == TTM_PL_VRAM) {
319 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 ptr->offset = bo->mem.start << PAGE_SHIFT;
321 } else {
322 ptr->gmrId = bo->mem.start;
323 ptr->offset = 0;
324 }
325 }
326
327
328 /**
329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
330 *
331 * @vbo: The buffer object. Must be reserved.
332 * @pin: Whether to pin or unpin.
333 *
334 */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)335 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336 {
337 struct ttm_operation_ctx ctx = { false, true };
338 struct ttm_place pl;
339 struct ttm_placement placement;
340 struct ttm_buffer_object *bo = &vbo->base;
341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret;
343
344 dma_resv_assert_held(bo->base.resv);
345
346 if (pin) {
347 if (vbo->pin_count++ > 0)
348 return;
349 } else {
350 WARN_ON(vbo->pin_count <= 0);
351 if (--vbo->pin_count > 0)
352 return;
353 }
354
355 pl.fpfn = 0;
356 pl.lpfn = 0;
357 pl.mem_type = bo->mem.mem_type;
358 pl.flags = bo->mem.placement;
359 if (pin)
360 pl.flags |= TTM_PL_FLAG_NO_EVICT;
361 else
362 pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
363
364 memset(&placement, 0, sizeof(placement));
365 placement.num_placement = 1;
366 placement.placement = &pl;
367
368 ret = ttm_bo_validate(bo, &placement, &ctx);
369
370 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
371 }
372
373
374 /**
375 * vmw_bo_map_and_cache - Map a buffer object and cache the map
376 *
377 * @vbo: The buffer object to map
378 * Return: A kernel virtual address or NULL if mapping failed.
379 *
380 * This function maps a buffer object into the kernel address space, or
381 * returns the virtual kernel address of an already existing map. The virtual
382 * address remains valid as long as the buffer object is pinned or reserved.
383 * The cached map is torn down on either
384 * 1) Buffer object move
385 * 2) Buffer object swapout
386 * 3) Buffer object destruction
387 *
388 */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)389 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
390 {
391 struct ttm_buffer_object *bo = &vbo->base;
392 bool not_used;
393 void *virtual;
394 int ret;
395
396 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
397 if (virtual)
398 return virtual;
399
400 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
401 if (ret)
402 DRM_ERROR("Buffer object map failed: %d.\n", ret);
403
404 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
405 }
406
407
408 /**
409 * vmw_bo_unmap - Tear down a cached buffer object map.
410 *
411 * @vbo: The buffer object whose map we are tearing down.
412 *
413 * This function tears down a cached map set up using
414 * vmw_buffer_object_map_and_cache().
415 */
vmw_bo_unmap(struct vmw_buffer_object * vbo)416 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
417 {
418 if (vbo->map.bo == NULL)
419 return;
420
421 ttm_bo_kunmap(&vbo->map);
422 }
423
424
425 /**
426 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
427 *
428 * @dev_priv: Pointer to a struct vmw_private identifying the device.
429 * @size: The requested buffer size.
430 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
431 */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)432 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
433 bool user)
434 {
435 static size_t struct_size, user_struct_size;
436 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
437 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
438
439 if (unlikely(struct_size == 0)) {
440 size_t backend_size = ttm_round_pot(vmw_tt_size);
441
442 struct_size = backend_size +
443 ttm_round_pot(sizeof(struct vmw_buffer_object));
444 user_struct_size = backend_size +
445 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
446 TTM_OBJ_EXTRA_SIZE;
447 }
448
449 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
450 page_array_size +=
451 ttm_round_pot(num_pages * sizeof(dma_addr_t));
452
453 return ((user) ? user_struct_size : struct_size) +
454 page_array_size;
455 }
456
457
458 /**
459 * vmw_bo_bo_free - vmw buffer object destructor
460 *
461 * @bo: Pointer to the embedded struct ttm_buffer_object
462 */
vmw_bo_bo_free(struct ttm_buffer_object * bo)463 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
464 {
465 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
466
467 WARN_ON(vmw_bo->dirty);
468 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
469 vmw_bo_unmap(vmw_bo);
470 kfree(vmw_bo);
471 }
472
473
474 /**
475 * vmw_user_bo_destroy - vmw buffer object destructor
476 *
477 * @bo: Pointer to the embedded struct ttm_buffer_object
478 */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)479 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
480 {
481 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
482 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
483
484 WARN_ON(vbo->dirty);
485 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
486 vmw_bo_unmap(vbo);
487 ttm_prime_object_kfree(vmw_user_bo, prime);
488 }
489
490
491 /**
492 * vmw_bo_init - Initialize a vmw buffer object
493 *
494 * @dev_priv: Pointer to the device private struct
495 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
496 * @size: Buffer object size in bytes.
497 * @placement: Initial placement.
498 * @interruptible: Whether waits should be performed interruptible.
499 * @bo_free: The buffer object destructor.
500 * Returns: Zero on success, negative error code on error.
501 *
502 * Note that on error, the code will free the buffer object.
503 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))504 int vmw_bo_init(struct vmw_private *dev_priv,
505 struct vmw_buffer_object *vmw_bo,
506 size_t size, struct ttm_placement *placement,
507 bool interruptible,
508 void (*bo_free)(struct ttm_buffer_object *bo))
509 {
510 struct ttm_bo_device *bdev = &dev_priv->bdev;
511 size_t acc_size;
512 int ret;
513 bool user = (bo_free == &vmw_user_bo_destroy);
514
515 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
516
517 acc_size = vmw_bo_acc_size(dev_priv, size, user);
518 memset(vmw_bo, 0, sizeof(*vmw_bo));
519 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
520 vmw_bo->base.priority = 3;
521 vmw_bo->res_tree = RB_ROOT;
522
523 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
524 ttm_bo_type_device, placement,
525 0, interruptible, acc_size,
526 NULL, NULL, bo_free);
527 return ret;
528 }
529
530
531 /**
532 * vmw_user_bo_release - TTM reference base object release callback for
533 * vmw user buffer objects
534 *
535 * @p_base: The TTM base object pointer about to be unreferenced.
536 *
537 * Clears the TTM base object pointer and drops the reference the
538 * base object has on the underlying struct vmw_buffer_object.
539 */
vmw_user_bo_release(struct ttm_base_object ** p_base)540 static void vmw_user_bo_release(struct ttm_base_object **p_base)
541 {
542 struct vmw_user_buffer_object *vmw_user_bo;
543 struct ttm_base_object *base = *p_base;
544
545 *p_base = NULL;
546
547 if (unlikely(base == NULL))
548 return;
549
550 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
551 prime.base);
552 ttm_bo_put(&vmw_user_bo->vbo.base);
553 }
554
555
556 /**
557 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
558 * for vmw user buffer objects
559 *
560 * @base: Pointer to the TTM base object
561 * @ref_type: Reference type of the reference reaching zero.
562 *
563 * Called when user-space drops its last synccpu reference on the buffer
564 * object, Either explicitly or as part of a cleanup file close.
565 */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)566 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
567 enum ttm_ref_type ref_type)
568 {
569 struct vmw_user_buffer_object *user_bo;
570
571 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
572
573 switch (ref_type) {
574 case TTM_REF_SYNCCPU_WRITE:
575 atomic_dec(&user_bo->vbo.cpu_writers);
576 break;
577 default:
578 WARN_ONCE(true, "Undefined buffer object reference release.\n");
579 }
580 }
581
582
583 /**
584 * vmw_user_bo_alloc - Allocate a user buffer object
585 *
586 * @dev_priv: Pointer to a struct device private.
587 * @tfile: Pointer to a struct ttm_object_file on which to register the user
588 * object.
589 * @size: Size of the buffer object.
590 * @shareable: Boolean whether the buffer is shareable with other open files.
591 * @handle: Pointer to where the handle value should be assigned.
592 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
593 * should be assigned.
594 * Return: Zero on success, negative error code on error.
595 */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)596 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
597 struct ttm_object_file *tfile,
598 uint32_t size,
599 bool shareable,
600 uint32_t *handle,
601 struct vmw_buffer_object **p_vbo,
602 struct ttm_base_object **p_base)
603 {
604 struct vmw_user_buffer_object *user_bo;
605 int ret;
606
607 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
608 if (unlikely(!user_bo)) {
609 DRM_ERROR("Failed to allocate a buffer.\n");
610 return -ENOMEM;
611 }
612
613 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
614 (dev_priv->has_mob) ?
615 &vmw_sys_placement :
616 &vmw_vram_sys_placement, true,
617 &vmw_user_bo_destroy);
618 if (unlikely(ret != 0))
619 return ret;
620
621 ttm_bo_get(&user_bo->vbo.base);
622 ret = ttm_prime_object_init(tfile,
623 size,
624 &user_bo->prime,
625 shareable,
626 ttm_buffer_type,
627 &vmw_user_bo_release,
628 &vmw_user_bo_ref_obj_release);
629 if (unlikely(ret != 0)) {
630 ttm_bo_put(&user_bo->vbo.base);
631 goto out_no_base_object;
632 }
633
634 *p_vbo = &user_bo->vbo;
635 if (p_base) {
636 *p_base = &user_bo->prime.base;
637 kref_get(&(*p_base)->refcount);
638 }
639 *handle = user_bo->prime.base.handle;
640
641 out_no_base_object:
642 return ret;
643 }
644
645
646 /**
647 * vmw_user_bo_verify_access - verify access permissions on this
648 * buffer object.
649 *
650 * @bo: Pointer to the buffer object being accessed
651 * @tfile: Identifying the caller.
652 */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)653 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
654 struct ttm_object_file *tfile)
655 {
656 struct vmw_user_buffer_object *vmw_user_bo;
657
658 if (unlikely(bo->destroy != vmw_user_bo_destroy))
659 return -EPERM;
660
661 vmw_user_bo = vmw_user_buffer_object(bo);
662
663 /* Check that the caller has opened the object. */
664 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
665 return 0;
666
667 DRM_ERROR("Could not grant buffer access.\n");
668 return -EPERM;
669 }
670
671
672 /**
673 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
674 * access, idling previous GPU operations on the buffer and optionally
675 * blocking it for further command submissions.
676 *
677 * @user_bo: Pointer to the buffer object being grabbed for CPU access
678 * @tfile: Identifying the caller.
679 * @flags: Flags indicating how the grab should be performed.
680 * Return: Zero on success, Negative error code on error. In particular,
681 * -EBUSY will be returned if a dontblock operation is requested and the
682 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
683 * interrupted by a signal.
684 *
685 * A blocking grab will be automatically released when @tfile is closed.
686 */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)687 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
688 struct ttm_object_file *tfile,
689 uint32_t flags)
690 {
691 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
692 struct ttm_buffer_object *bo = &user_bo->vbo.base;
693 bool existed;
694 int ret;
695
696 if (flags & drm_vmw_synccpu_allow_cs) {
697 long lret;
698
699 lret = dma_resv_wait_timeout_rcu
700 (bo->base.resv, true, true,
701 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
702 if (!lret)
703 return -EBUSY;
704 else if (lret < 0)
705 return lret;
706 return 0;
707 }
708
709 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
710 if (unlikely(ret != 0))
711 return ret;
712
713 ret = ttm_bo_wait(bo, true, nonblock);
714 if (likely(ret == 0))
715 atomic_inc(&user_bo->vbo.cpu_writers);
716
717 ttm_bo_unreserve(bo);
718 if (unlikely(ret != 0))
719 return ret;
720
721 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
722 TTM_REF_SYNCCPU_WRITE, &existed, false);
723 if (ret != 0 || existed)
724 atomic_dec(&user_bo->vbo.cpu_writers);
725
726 return ret;
727 }
728
729 /**
730 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
731 * and unblock command submission on the buffer if blocked.
732 *
733 * @handle: Handle identifying the buffer object.
734 * @tfile: Identifying the caller.
735 * @flags: Flags indicating the type of release.
736 */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)737 static int vmw_user_bo_synccpu_release(uint32_t handle,
738 struct ttm_object_file *tfile,
739 uint32_t flags)
740 {
741 if (!(flags & drm_vmw_synccpu_allow_cs))
742 return ttm_ref_object_base_unref(tfile, handle,
743 TTM_REF_SYNCCPU_WRITE);
744
745 return 0;
746 }
747
748
749 /**
750 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
751 * functionality.
752 *
753 * @dev: Identifies the drm device.
754 * @data: Pointer to the ioctl argument.
755 * @file_priv: Identifies the caller.
756 * Return: Zero on success, negative error code on error.
757 *
758 * This function checks the ioctl arguments for validity and calls the
759 * relevant synccpu functions.
760 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)761 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
762 struct drm_file *file_priv)
763 {
764 struct drm_vmw_synccpu_arg *arg =
765 (struct drm_vmw_synccpu_arg *) data;
766 struct vmw_buffer_object *vbo;
767 struct vmw_user_buffer_object *user_bo;
768 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
769 struct ttm_base_object *buffer_base;
770 int ret;
771
772 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
773 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
774 drm_vmw_synccpu_dontblock |
775 drm_vmw_synccpu_allow_cs)) != 0) {
776 DRM_ERROR("Illegal synccpu flags.\n");
777 return -EINVAL;
778 }
779
780 switch (arg->op) {
781 case drm_vmw_synccpu_grab:
782 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
783 &buffer_base);
784 if (unlikely(ret != 0))
785 return ret;
786
787 user_bo = container_of(vbo, struct vmw_user_buffer_object,
788 vbo);
789 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
790 vmw_bo_unreference(&vbo);
791 ttm_base_object_unref(&buffer_base);
792 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
793 ret != -EBUSY)) {
794 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
795 (unsigned int) arg->handle);
796 return ret;
797 }
798 break;
799 case drm_vmw_synccpu_release:
800 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
801 arg->flags);
802 if (unlikely(ret != 0)) {
803 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
804 (unsigned int) arg->handle);
805 return ret;
806 }
807 break;
808 default:
809 DRM_ERROR("Invalid synccpu operation.\n");
810 return -EINVAL;
811 }
812
813 return 0;
814 }
815
816
817 /**
818 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
819 * allocation functionality.
820 *
821 * @dev: Identifies the drm device.
822 * @data: Pointer to the ioctl argument.
823 * @file_priv: Identifies the caller.
824 * Return: Zero on success, negative error code on error.
825 *
826 * This function checks the ioctl arguments for validity and allocates a
827 * struct vmw_user_buffer_object bo.
828 */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)829 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
830 struct drm_file *file_priv)
831 {
832 struct vmw_private *dev_priv = vmw_priv(dev);
833 union drm_vmw_alloc_dmabuf_arg *arg =
834 (union drm_vmw_alloc_dmabuf_arg *)data;
835 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
836 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
837 struct vmw_buffer_object *vbo;
838 uint32_t handle;
839 int ret;
840
841 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
842 if (unlikely(ret != 0))
843 return ret;
844
845 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
846 req->size, false, &handle, &vbo,
847 NULL);
848 if (unlikely(ret != 0))
849 goto out_no_bo;
850
851 rep->handle = handle;
852 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
853 rep->cur_gmr_id = handle;
854 rep->cur_gmr_offset = 0;
855
856 vmw_bo_unreference(&vbo);
857
858 out_no_bo:
859 ttm_read_unlock(&dev_priv->reservation_sem);
860
861 return ret;
862 }
863
864
865 /**
866 * vmw_bo_unref_ioctl - Generic handle close ioctl.
867 *
868 * @dev: Identifies the drm device.
869 * @data: Pointer to the ioctl argument.
870 * @file_priv: Identifies the caller.
871 * Return: Zero on success, negative error code on error.
872 *
873 * This function checks the ioctl arguments for validity and closes a
874 * handle to a TTM base object, optionally freeing the object.
875 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)876 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
877 struct drm_file *file_priv)
878 {
879 struct drm_vmw_unref_dmabuf_arg *arg =
880 (struct drm_vmw_unref_dmabuf_arg *)data;
881
882 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
883 arg->handle,
884 TTM_REF_USAGE);
885 }
886
887
888 /**
889 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
890 *
891 * @tfile: The TTM object file the handle is registered with.
892 * @handle: The user buffer object handle
893 * @out: Pointer to a where a pointer to the embedded
894 * struct vmw_buffer_object should be placed.
895 * @p_base: Pointer to where a pointer to the TTM base object should be
896 * placed, or NULL if no such pointer is required.
897 * Return: Zero on success, Negative error code on error.
898 *
899 * Both the output base object pointer and the vmw buffer object pointer
900 * will be refcounted.
901 */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)902 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
903 uint32_t handle, struct vmw_buffer_object **out,
904 struct ttm_base_object **p_base)
905 {
906 struct vmw_user_buffer_object *vmw_user_bo;
907 struct ttm_base_object *base;
908
909 base = ttm_base_object_lookup(tfile, handle);
910 if (unlikely(base == NULL)) {
911 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
912 (unsigned long)handle);
913 return -ESRCH;
914 }
915
916 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
917 ttm_base_object_unref(&base);
918 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
919 (unsigned long)handle);
920 return -EINVAL;
921 }
922
923 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
924 prime.base);
925 ttm_bo_get(&vmw_user_bo->vbo.base);
926 if (p_base)
927 *p_base = base;
928 else
929 ttm_base_object_unref(&base);
930 *out = &vmw_user_bo->vbo;
931
932 return 0;
933 }
934
935 /**
936 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
937 * @tfile: The TTM object file the handle is registered with.
938 * @handle: The user buffer object handle.
939 *
940 * This function looks up a struct vmw_user_bo and returns a pointer to the
941 * struct vmw_buffer_object it derives from without refcounting the pointer.
942 * The returned pointer is only valid until vmw_user_bo_noref_release() is
943 * called, and the object pointed to by the returned pointer may be doomed.
944 * Any persistent usage of the object requires a refcount to be taken using
945 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
946 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
947 * or scheduling functions may be called inbetween these function calls.
948 *
949 * Return: A struct vmw_buffer_object pointer if successful or negative
950 * error pointer on failure.
951 */
952 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)953 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
954 {
955 struct vmw_user_buffer_object *vmw_user_bo;
956 struct ttm_base_object *base;
957
958 base = ttm_base_object_noref_lookup(tfile, handle);
959 if (!base) {
960 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
961 (unsigned long)handle);
962 return ERR_PTR(-ESRCH);
963 }
964
965 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
966 ttm_base_object_noref_release();
967 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
968 (unsigned long)handle);
969 return ERR_PTR(-EINVAL);
970 }
971
972 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
973 prime.base);
974 return &vmw_user_bo->vbo;
975 }
976
977 /**
978 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
979 *
980 * @tfile: The TTM object file to register the handle with.
981 * @vbo: The embedded vmw buffer object.
982 * @handle: Pointer to where the new handle should be placed.
983 * Return: Zero on success, Negative error code on error.
984 */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)985 int vmw_user_bo_reference(struct ttm_object_file *tfile,
986 struct vmw_buffer_object *vbo,
987 uint32_t *handle)
988 {
989 struct vmw_user_buffer_object *user_bo;
990
991 if (vbo->base.destroy != vmw_user_bo_destroy)
992 return -EINVAL;
993
994 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
995
996 *handle = user_bo->prime.base.handle;
997 return ttm_ref_object_add(tfile, &user_bo->prime.base,
998 TTM_REF_USAGE, NULL, false);
999 }
1000
1001
1002 /**
1003 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1004 * object without unreserving it.
1005 *
1006 * @bo: Pointer to the struct ttm_buffer_object to fence.
1007 * @fence: Pointer to the fence. If NULL, this function will
1008 * insert a fence into the command stream..
1009 *
1010 * Contrary to the ttm_eu version of this function, it takes only
1011 * a single buffer object instead of a list, and it also doesn't
1012 * unreserve the buffer object, which needs to be done separately.
1013 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1014 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1015 struct vmw_fence_obj *fence)
1016 {
1017 struct ttm_bo_device *bdev = bo->bdev;
1018
1019 struct vmw_private *dev_priv =
1020 container_of(bdev, struct vmw_private, bdev);
1021
1022 if (fence == NULL) {
1023 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1024 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1025 dma_fence_put(&fence->base);
1026 } else
1027 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1028 }
1029
1030
1031 /**
1032 * vmw_dumb_create - Create a dumb kms buffer
1033 *
1034 * @file_priv: Pointer to a struct drm_file identifying the caller.
1035 * @dev: Pointer to the drm device.
1036 * @args: Pointer to a struct drm_mode_create_dumb structure
1037 * Return: Zero on success, negative error code on failure.
1038 *
1039 * This is a driver callback for the core drm create_dumb functionality.
1040 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1041 * that the arguments have a different format.
1042 */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1043 int vmw_dumb_create(struct drm_file *file_priv,
1044 struct drm_device *dev,
1045 struct drm_mode_create_dumb *args)
1046 {
1047 struct vmw_private *dev_priv = vmw_priv(dev);
1048 struct vmw_buffer_object *vbo;
1049 int ret;
1050
1051 args->pitch = args->width * ((args->bpp + 7) / 8);
1052 args->size = args->pitch * args->height;
1053
1054 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1055 if (unlikely(ret != 0))
1056 return ret;
1057
1058 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1059 args->size, false, &args->handle,
1060 &vbo, NULL);
1061 if (unlikely(ret != 0))
1062 goto out_no_bo;
1063
1064 vmw_bo_unreference(&vbo);
1065 out_no_bo:
1066 ttm_read_unlock(&dev_priv->reservation_sem);
1067 return ret;
1068 }
1069
1070
1071 /**
1072 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1073 *
1074 * @file_priv: Pointer to a struct drm_file identifying the caller.
1075 * @dev: Pointer to the drm device.
1076 * @handle: Handle identifying the dumb buffer.
1077 * @offset: The address space offset returned.
1078 * Return: Zero on success, negative error code on failure.
1079 *
1080 * This is a driver callback for the core drm dumb_map_offset functionality.
1081 */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1082 int vmw_dumb_map_offset(struct drm_file *file_priv,
1083 struct drm_device *dev, uint32_t handle,
1084 uint64_t *offset)
1085 {
1086 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1087 struct vmw_buffer_object *out_buf;
1088 int ret;
1089
1090 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1091 if (ret != 0)
1092 return -EINVAL;
1093
1094 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1095 vmw_bo_unreference(&out_buf);
1096 return 0;
1097 }
1098
1099
1100 /**
1101 * vmw_dumb_destroy - Destroy a dumb boffer
1102 *
1103 * @file_priv: Pointer to a struct drm_file identifying the caller.
1104 * @dev: Pointer to the drm device.
1105 * @handle: Handle identifying the dumb buffer.
1106 * Return: Zero on success, negative error code on failure.
1107 *
1108 * This is a driver callback for the core drm dumb_destroy functionality.
1109 */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1110 int vmw_dumb_destroy(struct drm_file *file_priv,
1111 struct drm_device *dev,
1112 uint32_t handle)
1113 {
1114 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1115 handle, TTM_REF_USAGE);
1116 }
1117
1118
1119 /**
1120 * vmw_bo_swap_notify - swapout notify callback.
1121 *
1122 * @bo: The buffer object to be swapped out.
1123 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1124 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1125 {
1126 /* Is @bo embedded in a struct vmw_buffer_object? */
1127 if (bo->destroy != vmw_bo_bo_free &&
1128 bo->destroy != vmw_user_bo_destroy)
1129 return;
1130
1131 /* Kill any cached kernel maps before swapout */
1132 vmw_bo_unmap(vmw_buffer_object(bo));
1133 }
1134
1135
1136 /**
1137 * vmw_bo_move_notify - TTM move_notify_callback
1138 *
1139 * @bo: The TTM buffer object about to move.
1140 * @mem: The struct ttm_resource indicating to what memory
1141 * region the move is taking place.
1142 *
1143 * Detaches cached maps and device bindings that require that the
1144 * buffer doesn't move.
1145 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)1146 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1147 struct ttm_resource *mem)
1148 {
1149 struct vmw_buffer_object *vbo;
1150
1151 if (mem == NULL)
1152 return;
1153
1154 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1155 if (bo->destroy != vmw_bo_bo_free &&
1156 bo->destroy != vmw_user_bo_destroy)
1157 return;
1158
1159 vbo = container_of(bo, struct vmw_buffer_object, base);
1160
1161 /*
1162 * Kill any cached kernel maps before move to or from VRAM.
1163 * With other types of moves, the underlying pages stay the same,
1164 * and the map can be kept.
1165 */
1166 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1167 vmw_bo_unmap(vbo);
1168
1169 /*
1170 * If we're moving a backup MOB out of MOB placement, then make sure we
1171 * read back all resource content first, and unbind the MOB from
1172 * the resource.
1173 */
1174 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1175 vmw_resource_unbind_list(vbo);
1176 }
1177