• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/sched/signal.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 #define VMW_FENCE_WRAP (1 << 31)
33 
34 struct vmw_fence_manager {
35 	int num_fence_objects;
36 	struct vmw_private *dev_priv;
37 	spinlock_t lock;
38 	struct list_head fence_list;
39 	struct work_struct work;
40 	u32 user_fence_size;
41 	u32 fence_size;
42 	u32 event_fence_action_size;
43 	bool fifo_down;
44 	struct list_head cleanup_list;
45 	uint32_t pending_actions[VMW_ACTION_MAX];
46 	struct mutex goal_irq_mutex;
47 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
48 	bool seqno_valid; /* Protected by @lock, and may not be set to true
49 			     without the @goal_irq_mutex held. */
50 	u64 ctx;
51 };
52 
53 struct vmw_user_fence {
54 	struct ttm_base_object base;
55 	struct vmw_fence_obj fence;
56 };
57 
58 /**
59  * struct vmw_event_fence_action - fence action that delivers a drm event.
60  *
61  * @action: A struct vmw_fence_action to hook up to a fence.
62  * @event: A pointer to the pending event.
63  * @fence: A referenced pointer to the fence to keep it alive while @action
64  * hangs on it.
65  * @dev: Pointer to a struct drm_device so we can access the event stuff.
66  * @tv_sec: If non-null, the variable pointed to will be assigned
67  * current time tv_sec val when the fence signals.
68  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69  * be assigned the current time tv_usec val when the fence signals.
70  */
71 struct vmw_event_fence_action {
72 	struct vmw_fence_action action;
73 
74 	struct drm_pending_event *event;
75 	struct vmw_fence_obj *fence;
76 	struct drm_device *dev;
77 
78 	uint32_t *tv_sec;
79 	uint32_t *tv_usec;
80 };
81 
82 static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj * fence)83 fman_from_fence(struct vmw_fence_obj *fence)
84 {
85 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
86 }
87 
vmw_fence_goal_read(struct vmw_private * vmw)88 static u32 vmw_fence_goal_read(struct vmw_private *vmw)
89 {
90 	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
91 		return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
92 	else
93 		return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
94 }
95 
vmw_fence_goal_write(struct vmw_private * vmw,u32 value)96 static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
97 {
98 	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
99 		vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
100 	else
101 		vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
102 }
103 
104 /*
105  * Note on fencing subsystem usage of irqs:
106  * Typically the vmw_fences_update function is called
107  *
108  * a) When a new fence seqno has been submitted by the fifo code.
109  * b) On-demand when we have waiters. Sleeping waiters will switch on the
110  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
111  * irq is received. When the last fence waiter is gone, that IRQ is masked
112  * away.
113  *
114  * In situations where there are no waiters and we don't submit any new fences,
115  * fence objects may not be signaled. This is perfectly OK, since there are
116  * no consumers of the signaled data, but that is NOT ok when there are fence
117  * actions attached to a fence. The fencing subsystem then makes use of the
118  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
119  * which has an action attached, and each time vmw_fences_update is called,
120  * the subsystem makes sure the fence goal seqno is updated.
121  *
122  * The fence goal seqno irq is on as long as there are unsignaled fence
123  * objects with actions attached to them.
124  */
125 
vmw_fence_obj_destroy(struct dma_fence * f)126 static void vmw_fence_obj_destroy(struct dma_fence *f)
127 {
128 	struct vmw_fence_obj *fence =
129 		container_of(f, struct vmw_fence_obj, base);
130 
131 	struct vmw_fence_manager *fman = fman_from_fence(fence);
132 
133 	spin_lock(&fman->lock);
134 	list_del_init(&fence->head);
135 	--fman->num_fence_objects;
136 	spin_unlock(&fman->lock);
137 	fence->destroy(fence);
138 }
139 
vmw_fence_get_driver_name(struct dma_fence * f)140 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
141 {
142 	return "vmwgfx";
143 }
144 
vmw_fence_get_timeline_name(struct dma_fence * f)145 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
146 {
147 	return "svga";
148 }
149 
vmw_fence_enable_signaling(struct dma_fence * f)150 static bool vmw_fence_enable_signaling(struct dma_fence *f)
151 {
152 	struct vmw_fence_obj *fence =
153 		container_of(f, struct vmw_fence_obj, base);
154 
155 	struct vmw_fence_manager *fman = fman_from_fence(fence);
156 	struct vmw_private *dev_priv = fman->dev_priv;
157 
158 	u32 seqno = vmw_fence_read(dev_priv);
159 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
160 		return false;
161 
162 	return true;
163 }
164 
165 struct vmwgfx_wait_cb {
166 	struct dma_fence_cb base;
167 	struct task_struct *task;
168 };
169 
170 static void
vmwgfx_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)171 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
172 {
173 	struct vmwgfx_wait_cb *wait =
174 		container_of(cb, struct vmwgfx_wait_cb, base);
175 
176 	wake_up_process(wait->task);
177 }
178 
179 static void __vmw_fences_update(struct vmw_fence_manager *fman);
180 
vmw_fence_wait(struct dma_fence * f,bool intr,signed long timeout)181 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
182 {
183 	struct vmw_fence_obj *fence =
184 		container_of(f, struct vmw_fence_obj, base);
185 
186 	struct vmw_fence_manager *fman = fman_from_fence(fence);
187 	struct vmw_private *dev_priv = fman->dev_priv;
188 	struct vmwgfx_wait_cb cb;
189 	long ret = timeout;
190 
191 	if (likely(vmw_fence_obj_signaled(fence)))
192 		return timeout;
193 
194 	vmw_seqno_waiter_add(dev_priv);
195 
196 	spin_lock(f->lock);
197 
198 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
199 		goto out;
200 
201 	if (intr && signal_pending(current)) {
202 		ret = -ERESTARTSYS;
203 		goto out;
204 	}
205 
206 	cb.base.func = vmwgfx_wait_cb;
207 	cb.task = current;
208 	list_add(&cb.base.node, &f->cb_list);
209 
210 	for (;;) {
211 		__vmw_fences_update(fman);
212 
213 		/*
214 		 * We can use the barrier free __set_current_state() since
215 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
216 		 * fence spinlock.
217 		 */
218 		if (intr)
219 			__set_current_state(TASK_INTERRUPTIBLE);
220 		else
221 			__set_current_state(TASK_UNINTERRUPTIBLE);
222 
223 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
224 			if (ret == 0 && timeout > 0)
225 				ret = 1;
226 			break;
227 		}
228 
229 		if (intr && signal_pending(current)) {
230 			ret = -ERESTARTSYS;
231 			break;
232 		}
233 
234 		if (ret == 0)
235 			break;
236 
237 		spin_unlock(f->lock);
238 
239 		ret = schedule_timeout(ret);
240 
241 		spin_lock(f->lock);
242 	}
243 	__set_current_state(TASK_RUNNING);
244 	if (!list_empty(&cb.base.node))
245 		list_del(&cb.base.node);
246 
247 out:
248 	spin_unlock(f->lock);
249 
250 	vmw_seqno_waiter_remove(dev_priv);
251 
252 	return ret;
253 }
254 
255 static const struct dma_fence_ops vmw_fence_ops = {
256 	.get_driver_name = vmw_fence_get_driver_name,
257 	.get_timeline_name = vmw_fence_get_timeline_name,
258 	.enable_signaling = vmw_fence_enable_signaling,
259 	.wait = vmw_fence_wait,
260 	.release = vmw_fence_obj_destroy,
261 };
262 
263 
264 /*
265  * Execute signal actions on fences recently signaled.
266  * This is done from a workqueue so we don't have to execute
267  * signal actions from atomic context.
268  */
269 
vmw_fence_work_func(struct work_struct * work)270 static void vmw_fence_work_func(struct work_struct *work)
271 {
272 	struct vmw_fence_manager *fman =
273 		container_of(work, struct vmw_fence_manager, work);
274 	struct list_head list;
275 	struct vmw_fence_action *action, *next_action;
276 	bool seqno_valid;
277 
278 	do {
279 		INIT_LIST_HEAD(&list);
280 		mutex_lock(&fman->goal_irq_mutex);
281 
282 		spin_lock(&fman->lock);
283 		list_splice_init(&fman->cleanup_list, &list);
284 		seqno_valid = fman->seqno_valid;
285 		spin_unlock(&fman->lock);
286 
287 		if (!seqno_valid && fman->goal_irq_on) {
288 			fman->goal_irq_on = false;
289 			vmw_goal_waiter_remove(fman->dev_priv);
290 		}
291 		mutex_unlock(&fman->goal_irq_mutex);
292 
293 		if (list_empty(&list))
294 			return;
295 
296 		/*
297 		 * At this point, only we should be able to manipulate the
298 		 * list heads of the actions we have on the private list.
299 		 * hence fman::lock not held.
300 		 */
301 
302 		list_for_each_entry_safe(action, next_action, &list, head) {
303 			list_del_init(&action->head);
304 			if (action->cleanup)
305 				action->cleanup(action);
306 		}
307 	} while (1);
308 }
309 
vmw_fence_manager_init(struct vmw_private * dev_priv)310 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
311 {
312 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
313 
314 	if (unlikely(!fman))
315 		return NULL;
316 
317 	fman->dev_priv = dev_priv;
318 	spin_lock_init(&fman->lock);
319 	INIT_LIST_HEAD(&fman->fence_list);
320 	INIT_LIST_HEAD(&fman->cleanup_list);
321 	INIT_WORK(&fman->work, &vmw_fence_work_func);
322 	fman->fifo_down = true;
323 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
324 		TTM_OBJ_EXTRA_SIZE;
325 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
326 	fman->event_fence_action_size =
327 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
328 	mutex_init(&fman->goal_irq_mutex);
329 	fman->ctx = dma_fence_context_alloc(1);
330 
331 	return fman;
332 }
333 
vmw_fence_manager_takedown(struct vmw_fence_manager * fman)334 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
335 {
336 	bool lists_empty;
337 
338 	(void) cancel_work_sync(&fman->work);
339 
340 	spin_lock(&fman->lock);
341 	lists_empty = list_empty(&fman->fence_list) &&
342 		list_empty(&fman->cleanup_list);
343 	spin_unlock(&fman->lock);
344 
345 	BUG_ON(!lists_empty);
346 	kfree(fman);
347 }
348 
vmw_fence_obj_init(struct vmw_fence_manager * fman,struct vmw_fence_obj * fence,u32 seqno,void (* destroy)(struct vmw_fence_obj * fence))349 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
350 			      struct vmw_fence_obj *fence, u32 seqno,
351 			      void (*destroy) (struct vmw_fence_obj *fence))
352 {
353 	int ret = 0;
354 
355 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
356 		       fman->ctx, seqno);
357 	INIT_LIST_HEAD(&fence->seq_passed_actions);
358 	fence->destroy = destroy;
359 
360 	spin_lock(&fman->lock);
361 	if (unlikely(fman->fifo_down)) {
362 		ret = -EBUSY;
363 		goto out_unlock;
364 	}
365 	list_add_tail(&fence->head, &fman->fence_list);
366 	++fman->num_fence_objects;
367 
368 out_unlock:
369 	spin_unlock(&fman->lock);
370 	return ret;
371 
372 }
373 
vmw_fences_perform_actions(struct vmw_fence_manager * fman,struct list_head * list)374 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
375 				struct list_head *list)
376 {
377 	struct vmw_fence_action *action, *next_action;
378 
379 	list_for_each_entry_safe(action, next_action, list, head) {
380 		list_del_init(&action->head);
381 		fman->pending_actions[action->type]--;
382 		if (action->seq_passed != NULL)
383 			action->seq_passed(action);
384 
385 		/*
386 		 * Add the cleanup action to the cleanup list so that
387 		 * it will be performed by a worker task.
388 		 */
389 
390 		list_add_tail(&action->head, &fman->cleanup_list);
391 	}
392 }
393 
394 /**
395  * vmw_fence_goal_new_locked - Figure out a new device fence goal
396  * seqno if needed.
397  *
398  * @fman: Pointer to a fence manager.
399  * @passed_seqno: The seqno the device currently signals as passed.
400  *
401  * This function should be called with the fence manager lock held.
402  * It is typically called when we have a new passed_seqno, and
403  * we might need to update the fence goal. It checks to see whether
404  * the current fence goal has already passed, and, in that case,
405  * scans through all unsignaled fences to get the next fence object with an
406  * action attached, and sets the seqno of that fence as a new fence goal.
407  *
408  * returns true if the device goal seqno was updated. False otherwise.
409  */
vmw_fence_goal_new_locked(struct vmw_fence_manager * fman,u32 passed_seqno)410 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
411 				      u32 passed_seqno)
412 {
413 	u32 goal_seqno;
414 	struct vmw_fence_obj *fence;
415 
416 	if (likely(!fman->seqno_valid))
417 		return false;
418 
419 	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
420 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
421 		return false;
422 
423 	fman->seqno_valid = false;
424 	list_for_each_entry(fence, &fman->fence_list, head) {
425 		if (!list_empty(&fence->seq_passed_actions)) {
426 			fman->seqno_valid = true;
427 			vmw_fence_goal_write(fman->dev_priv,
428 					     fence->base.seqno);
429 			break;
430 		}
431 	}
432 
433 	return true;
434 }
435 
436 
437 /**
438  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
439  * needed.
440  *
441  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
442  * considered as a device fence goal.
443  *
444  * This function should be called with the fence manager lock held.
445  * It is typically called when an action has been attached to a fence to
446  * check whether the seqno of that fence should be used for a fence
447  * goal interrupt. This is typically needed if the current fence goal is
448  * invalid, or has a higher seqno than that of the current fence object.
449  *
450  * returns true if the device goal seqno was updated. False otherwise.
451  */
vmw_fence_goal_check_locked(struct vmw_fence_obj * fence)452 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
453 {
454 	struct vmw_fence_manager *fman = fman_from_fence(fence);
455 	u32 goal_seqno;
456 
457 	if (dma_fence_is_signaled_locked(&fence->base))
458 		return false;
459 
460 	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
461 	if (likely(fman->seqno_valid &&
462 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
463 		return false;
464 
465 	vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
466 	fman->seqno_valid = true;
467 
468 	return true;
469 }
470 
__vmw_fences_update(struct vmw_fence_manager * fman)471 static void __vmw_fences_update(struct vmw_fence_manager *fman)
472 {
473 	struct vmw_fence_obj *fence, *next_fence;
474 	struct list_head action_list;
475 	bool needs_rerun;
476 	uint32_t seqno, new_seqno;
477 
478 	seqno = vmw_fence_read(fman->dev_priv);
479 rerun:
480 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
481 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
482 			list_del_init(&fence->head);
483 			dma_fence_signal_locked(&fence->base);
484 			INIT_LIST_HEAD(&action_list);
485 			list_splice_init(&fence->seq_passed_actions,
486 					 &action_list);
487 			vmw_fences_perform_actions(fman, &action_list);
488 		} else
489 			break;
490 	}
491 
492 	/*
493 	 * Rerun if the fence goal seqno was updated, and the
494 	 * hardware might have raced with that update, so that
495 	 * we missed a fence_goal irq.
496 	 */
497 
498 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
499 	if (unlikely(needs_rerun)) {
500 		new_seqno = vmw_fence_read(fman->dev_priv);
501 		if (new_seqno != seqno) {
502 			seqno = new_seqno;
503 			goto rerun;
504 		}
505 	}
506 
507 	if (!list_empty(&fman->cleanup_list))
508 		(void) schedule_work(&fman->work);
509 }
510 
vmw_fences_update(struct vmw_fence_manager * fman)511 void vmw_fences_update(struct vmw_fence_manager *fman)
512 {
513 	spin_lock(&fman->lock);
514 	__vmw_fences_update(fman);
515 	spin_unlock(&fman->lock);
516 }
517 
vmw_fence_obj_signaled(struct vmw_fence_obj * fence)518 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
519 {
520 	struct vmw_fence_manager *fman = fman_from_fence(fence);
521 
522 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
523 		return true;
524 
525 	vmw_fences_update(fman);
526 
527 	return dma_fence_is_signaled(&fence->base);
528 }
529 
vmw_fence_obj_wait(struct vmw_fence_obj * fence,bool lazy,bool interruptible,unsigned long timeout)530 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
531 		       bool interruptible, unsigned long timeout)
532 {
533 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
534 
535 	if (likely(ret > 0))
536 		return 0;
537 	else if (ret == 0)
538 		return -EBUSY;
539 	else
540 		return ret;
541 }
542 
vmw_fence_destroy(struct vmw_fence_obj * fence)543 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
544 {
545 	dma_fence_free(&fence->base);
546 }
547 
vmw_fence_create(struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence)548 int vmw_fence_create(struct vmw_fence_manager *fman,
549 		     uint32_t seqno,
550 		     struct vmw_fence_obj **p_fence)
551 {
552 	struct vmw_fence_obj *fence;
553  	int ret;
554 
555 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
556 	if (unlikely(!fence))
557 		return -ENOMEM;
558 
559 	ret = vmw_fence_obj_init(fman, fence, seqno,
560 				 vmw_fence_destroy);
561 	if (unlikely(ret != 0))
562 		goto out_err_init;
563 
564 	*p_fence = fence;
565 	return 0;
566 
567 out_err_init:
568 	kfree(fence);
569 	return ret;
570 }
571 
572 
vmw_user_fence_destroy(struct vmw_fence_obj * fence)573 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
574 {
575 	struct vmw_user_fence *ufence =
576 		container_of(fence, struct vmw_user_fence, fence);
577 	struct vmw_fence_manager *fman = fman_from_fence(fence);
578 
579 	ttm_base_object_kfree(ufence, base);
580 	/*
581 	 * Free kernel space accounting.
582 	 */
583 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
584 			    fman->user_fence_size);
585 }
586 
vmw_user_fence_base_release(struct ttm_base_object ** p_base)587 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
588 {
589 	struct ttm_base_object *base = *p_base;
590 	struct vmw_user_fence *ufence =
591 		container_of(base, struct vmw_user_fence, base);
592 	struct vmw_fence_obj *fence = &ufence->fence;
593 
594 	*p_base = NULL;
595 	vmw_fence_obj_unreference(&fence);
596 }
597 
vmw_user_fence_create(struct drm_file * file_priv,struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)598 int vmw_user_fence_create(struct drm_file *file_priv,
599 			  struct vmw_fence_manager *fman,
600 			  uint32_t seqno,
601 			  struct vmw_fence_obj **p_fence,
602 			  uint32_t *p_handle)
603 {
604 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
605 	struct vmw_user_fence *ufence;
606 	struct vmw_fence_obj *tmp;
607 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
608 	struct ttm_operation_ctx ctx = {
609 		.interruptible = false,
610 		.no_wait_gpu = false
611 	};
612 	int ret;
613 
614 	/*
615 	 * Kernel memory space accounting, since this object may
616 	 * be created by a user-space request.
617 	 */
618 
619 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
620 				   &ctx);
621 	if (unlikely(ret != 0))
622 		return ret;
623 
624 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
625 	if (unlikely(!ufence)) {
626 		ret = -ENOMEM;
627 		goto out_no_object;
628 	}
629 
630 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
631 				 vmw_user_fence_destroy);
632 	if (unlikely(ret != 0)) {
633 		kfree(ufence);
634 		goto out_no_object;
635 	}
636 
637 	/*
638 	 * The base object holds a reference which is freed in
639 	 * vmw_user_fence_base_release.
640 	 */
641 	tmp = vmw_fence_obj_reference(&ufence->fence);
642 	ret = ttm_base_object_init(tfile, &ufence->base, false,
643 				   VMW_RES_FENCE,
644 				   &vmw_user_fence_base_release, NULL);
645 
646 
647 	if (unlikely(ret != 0)) {
648 		/*
649 		 * Free the base object's reference
650 		 */
651 		vmw_fence_obj_unreference(&tmp);
652 		goto out_err;
653 	}
654 
655 	*p_fence = &ufence->fence;
656 	*p_handle = ufence->base.handle;
657 
658 	return 0;
659 out_err:
660 	tmp = &ufence->fence;
661 	vmw_fence_obj_unreference(&tmp);
662 out_no_object:
663 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
664 	return ret;
665 }
666 
667 
668 /**
669  * vmw_wait_dma_fence - Wait for a dma fence
670  *
671  * @fman: pointer to a fence manager
672  * @fence: DMA fence to wait on
673  *
674  * This function handles the case when the fence is actually a fence
675  * array.  If that's the case, it'll wait on each of the child fence
676  */
vmw_wait_dma_fence(struct vmw_fence_manager * fman,struct dma_fence * fence)677 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
678 		       struct dma_fence *fence)
679 {
680 	struct dma_fence_array *fence_array;
681 	int ret = 0;
682 	int i;
683 
684 
685 	if (dma_fence_is_signaled(fence))
686 		return 0;
687 
688 	if (!dma_fence_is_array(fence))
689 		return dma_fence_wait(fence, true);
690 
691 	/* From i915: Note that if the fence-array was created in
692 	 * signal-on-any mode, we should *not* decompose it into its individual
693 	 * fences. However, we don't currently store which mode the fence-array
694 	 * is operating in. Fortunately, the only user of signal-on-any is
695 	 * private to amdgpu and we should not see any incoming fence-array
696 	 * from sync-file being in signal-on-any mode.
697 	 */
698 
699 	fence_array = to_dma_fence_array(fence);
700 	for (i = 0; i < fence_array->num_fences; i++) {
701 		struct dma_fence *child = fence_array->fences[i];
702 
703 		ret = dma_fence_wait(child, true);
704 
705 		if (ret < 0)
706 			return ret;
707 	}
708 
709 	return 0;
710 }
711 
712 
713 /*
714  * vmw_fence_fifo_down - signal all unsignaled fence objects.
715  */
716 
vmw_fence_fifo_down(struct vmw_fence_manager * fman)717 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
718 {
719 	struct list_head action_list;
720 	int ret;
721 
722 	/*
723 	 * The list may be altered while we traverse it, so always
724 	 * restart when we've released the fman->lock.
725 	 */
726 
727 	spin_lock(&fman->lock);
728 	fman->fifo_down = true;
729 	while (!list_empty(&fman->fence_list)) {
730 		struct vmw_fence_obj *fence =
731 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
732 				   head);
733 		dma_fence_get(&fence->base);
734 		spin_unlock(&fman->lock);
735 
736 		ret = vmw_fence_obj_wait(fence, false, false,
737 					 VMW_FENCE_WAIT_TIMEOUT);
738 
739 		if (unlikely(ret != 0)) {
740 			list_del_init(&fence->head);
741 			dma_fence_signal(&fence->base);
742 			INIT_LIST_HEAD(&action_list);
743 			list_splice_init(&fence->seq_passed_actions,
744 					 &action_list);
745 			vmw_fences_perform_actions(fman, &action_list);
746 		}
747 
748 		BUG_ON(!list_empty(&fence->head));
749 		dma_fence_put(&fence->base);
750 		spin_lock(&fman->lock);
751 	}
752 	spin_unlock(&fman->lock);
753 }
754 
vmw_fence_fifo_up(struct vmw_fence_manager * fman)755 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
756 {
757 	spin_lock(&fman->lock);
758 	fman->fifo_down = false;
759 	spin_unlock(&fman->lock);
760 }
761 
762 
763 /**
764  * vmw_fence_obj_lookup - Look up a user-space fence object
765  *
766  * @tfile: A struct ttm_object_file identifying the caller.
767  * @handle: A handle identifying the fence object.
768  * @return: A struct vmw_user_fence base ttm object on success or
769  * an error pointer on failure.
770  *
771  * The fence object is looked up and type-checked. The caller needs
772  * to have opened the fence object first, but since that happens on
773  * creation and fence objects aren't shareable, that's not an
774  * issue currently.
775  */
776 static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file * tfile,u32 handle)777 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
778 {
779 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
780 
781 	if (!base) {
782 		pr_err("Invalid fence object handle 0x%08lx.\n",
783 		       (unsigned long)handle);
784 		return ERR_PTR(-EINVAL);
785 	}
786 
787 	if (base->refcount_release != vmw_user_fence_base_release) {
788 		pr_err("Invalid fence object handle 0x%08lx.\n",
789 		       (unsigned long)handle);
790 		ttm_base_object_unref(&base);
791 		return ERR_PTR(-EINVAL);
792 	}
793 
794 	return base;
795 }
796 
797 
vmw_fence_obj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)798 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
799 			     struct drm_file *file_priv)
800 {
801 	struct drm_vmw_fence_wait_arg *arg =
802 	    (struct drm_vmw_fence_wait_arg *)data;
803 	unsigned long timeout;
804 	struct ttm_base_object *base;
805 	struct vmw_fence_obj *fence;
806 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
807 	int ret;
808 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
809 
810 	/*
811 	 * 64-bit division not present on 32-bit systems, so do an
812 	 * approximation. (Divide by 1000000).
813 	 */
814 
815 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
816 	  (wait_timeout >> 26);
817 
818 	if (!arg->cookie_valid) {
819 		arg->cookie_valid = 1;
820 		arg->kernel_cookie = jiffies + wait_timeout;
821 	}
822 
823 	base = vmw_fence_obj_lookup(tfile, arg->handle);
824 	if (IS_ERR(base))
825 		return PTR_ERR(base);
826 
827 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
828 
829 	timeout = jiffies;
830 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
831 		ret = ((vmw_fence_obj_signaled(fence)) ?
832 		       0 : -EBUSY);
833 		goto out;
834 	}
835 
836 	timeout = (unsigned long)arg->kernel_cookie - timeout;
837 
838 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
839 
840 out:
841 	ttm_base_object_unref(&base);
842 
843 	/*
844 	 * Optionally unref the fence object.
845 	 */
846 
847 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
848 		return ttm_ref_object_base_unref(tfile, arg->handle,
849 						 TTM_REF_USAGE);
850 	return ret;
851 }
852 
vmw_fence_obj_signaled_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)853 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
854 				 struct drm_file *file_priv)
855 {
856 	struct drm_vmw_fence_signaled_arg *arg =
857 		(struct drm_vmw_fence_signaled_arg *) data;
858 	struct ttm_base_object *base;
859 	struct vmw_fence_obj *fence;
860 	struct vmw_fence_manager *fman;
861 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
862 	struct vmw_private *dev_priv = vmw_priv(dev);
863 
864 	base = vmw_fence_obj_lookup(tfile, arg->handle);
865 	if (IS_ERR(base))
866 		return PTR_ERR(base);
867 
868 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
869 	fman = fman_from_fence(fence);
870 
871 	arg->signaled = vmw_fence_obj_signaled(fence);
872 
873 	arg->signaled_flags = arg->flags;
874 	spin_lock(&fman->lock);
875 	arg->passed_seqno = dev_priv->last_read_seqno;
876 	spin_unlock(&fman->lock);
877 
878 	ttm_base_object_unref(&base);
879 
880 	return 0;
881 }
882 
883 
vmw_fence_obj_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)884 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
885 			      struct drm_file *file_priv)
886 {
887 	struct drm_vmw_fence_arg *arg =
888 		(struct drm_vmw_fence_arg *) data;
889 
890 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
891 					 arg->handle,
892 					 TTM_REF_USAGE);
893 }
894 
895 /**
896  * vmw_event_fence_action_seq_passed
897  *
898  * @action: The struct vmw_fence_action embedded in a struct
899  * vmw_event_fence_action.
900  *
901  * This function is called when the seqno of the fence where @action is
902  * attached has passed. It queues the event on the submitter's event list.
903  * This function is always called from atomic context.
904  */
vmw_event_fence_action_seq_passed(struct vmw_fence_action * action)905 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
906 {
907 	struct vmw_event_fence_action *eaction =
908 		container_of(action, struct vmw_event_fence_action, action);
909 	struct drm_device *dev = eaction->dev;
910 	struct drm_pending_event *event = eaction->event;
911 
912 	if (unlikely(event == NULL))
913 		return;
914 
915 	spin_lock_irq(&dev->event_lock);
916 
917 	if (likely(eaction->tv_sec != NULL)) {
918 		struct timespec64 ts;
919 
920 		ktime_get_ts64(&ts);
921 		/* monotonic time, so no y2038 overflow */
922 		*eaction->tv_sec = ts.tv_sec;
923 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
924 	}
925 
926 	drm_send_event_locked(dev, eaction->event);
927 	eaction->event = NULL;
928 	spin_unlock_irq(&dev->event_lock);
929 }
930 
931 /**
932  * vmw_event_fence_action_cleanup
933  *
934  * @action: The struct vmw_fence_action embedded in a struct
935  * vmw_event_fence_action.
936  *
937  * This function is the struct vmw_fence_action destructor. It's typically
938  * called from a workqueue.
939  */
vmw_event_fence_action_cleanup(struct vmw_fence_action * action)940 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
941 {
942 	struct vmw_event_fence_action *eaction =
943 		container_of(action, struct vmw_event_fence_action, action);
944 
945 	vmw_fence_obj_unreference(&eaction->fence);
946 	kfree(eaction);
947 }
948 
949 
950 /**
951  * vmw_fence_obj_add_action - Add an action to a fence object.
952  *
953  * @fence: The fence object.
954  * @action: The action to add.
955  *
956  * Note that the action callbacks may be executed before this function
957  * returns.
958  */
vmw_fence_obj_add_action(struct vmw_fence_obj * fence,struct vmw_fence_action * action)959 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
960 			      struct vmw_fence_action *action)
961 {
962 	struct vmw_fence_manager *fman = fman_from_fence(fence);
963 	bool run_update = false;
964 
965 	mutex_lock(&fman->goal_irq_mutex);
966 	spin_lock(&fman->lock);
967 
968 	fman->pending_actions[action->type]++;
969 	if (dma_fence_is_signaled_locked(&fence->base)) {
970 		struct list_head action_list;
971 
972 		INIT_LIST_HEAD(&action_list);
973 		list_add_tail(&action->head, &action_list);
974 		vmw_fences_perform_actions(fman, &action_list);
975 	} else {
976 		list_add_tail(&action->head, &fence->seq_passed_actions);
977 
978 		/*
979 		 * This function may set fman::seqno_valid, so it must
980 		 * be run with the goal_irq_mutex held.
981 		 */
982 		run_update = vmw_fence_goal_check_locked(fence);
983 	}
984 
985 	spin_unlock(&fman->lock);
986 
987 	if (run_update) {
988 		if (!fman->goal_irq_on) {
989 			fman->goal_irq_on = true;
990 			vmw_goal_waiter_add(fman->dev_priv);
991 		}
992 		vmw_fences_update(fman);
993 	}
994 	mutex_unlock(&fman->goal_irq_mutex);
995 
996 }
997 
998 /**
999  * vmw_event_fence_action_queue - Post an event for sending when a fence
1000  * object seqno has passed.
1001  *
1002  * @file_priv: The file connection on which the event should be posted.
1003  * @fence: The fence object on which to post the event.
1004  * @event: Event to be posted. This event should've been alloced
1005  * using k[mz]alloc, and should've been completely initialized.
1006  * @tv_sec: If non-null, the variable pointed to will be assigned
1007  * current time tv_sec val when the fence signals.
1008  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
1009  * be assigned the current time tv_usec val when the fence signals.
1010  * @interruptible: Interruptible waits if possible.
1011  *
1012  * As a side effect, the object pointed to by @event may have been
1013  * freed when this function returns. If this function returns with
1014  * an error code, the caller needs to free that object.
1015  */
1016 
vmw_event_fence_action_queue(struct drm_file * file_priv,struct vmw_fence_obj * fence,struct drm_pending_event * event,uint32_t * tv_sec,uint32_t * tv_usec,bool interruptible)1017 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1018 				 struct vmw_fence_obj *fence,
1019 				 struct drm_pending_event *event,
1020 				 uint32_t *tv_sec,
1021 				 uint32_t *tv_usec,
1022 				 bool interruptible)
1023 {
1024 	struct vmw_event_fence_action *eaction;
1025 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1026 
1027 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1028 	if (unlikely(!eaction))
1029 		return -ENOMEM;
1030 
1031 	eaction->event = event;
1032 
1033 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1034 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1035 	eaction->action.type = VMW_ACTION_EVENT;
1036 
1037 	eaction->fence = vmw_fence_obj_reference(fence);
1038 	eaction->dev = &fman->dev_priv->drm;
1039 	eaction->tv_sec = tv_sec;
1040 	eaction->tv_usec = tv_usec;
1041 
1042 	vmw_fence_obj_add_action(fence, &eaction->action);
1043 
1044 	return 0;
1045 }
1046 
1047 struct vmw_event_fence_pending {
1048 	struct drm_pending_event base;
1049 	struct drm_vmw_event_fence event;
1050 };
1051 
vmw_event_fence_action_create(struct drm_file * file_priv,struct vmw_fence_obj * fence,uint32_t flags,uint64_t user_data,bool interruptible)1052 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1053 				  struct vmw_fence_obj *fence,
1054 				  uint32_t flags,
1055 				  uint64_t user_data,
1056 				  bool interruptible)
1057 {
1058 	struct vmw_event_fence_pending *event;
1059 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1060 	struct drm_device *dev = &fman->dev_priv->drm;
1061 	int ret;
1062 
1063 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1064 	if (unlikely(!event)) {
1065 		DRM_ERROR("Failed to allocate an event.\n");
1066 		ret = -ENOMEM;
1067 		goto out_no_space;
1068 	}
1069 
1070 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1071 	event->event.base.length = sizeof(*event);
1072 	event->event.user_data = user_data;
1073 
1074 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1075 
1076 	if (unlikely(ret != 0)) {
1077 		DRM_ERROR("Failed to allocate event space for this file.\n");
1078 		kfree(event);
1079 		goto out_no_space;
1080 	}
1081 
1082 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1083 		ret = vmw_event_fence_action_queue(file_priv, fence,
1084 						   &event->base,
1085 						   &event->event.tv_sec,
1086 						   &event->event.tv_usec,
1087 						   interruptible);
1088 	else
1089 		ret = vmw_event_fence_action_queue(file_priv, fence,
1090 						   &event->base,
1091 						   NULL,
1092 						   NULL,
1093 						   interruptible);
1094 	if (ret != 0)
1095 		goto out_no_queue;
1096 
1097 	return 0;
1098 
1099 out_no_queue:
1100 	drm_event_cancel_free(dev, &event->base);
1101 out_no_space:
1102 	return ret;
1103 }
1104 
vmw_fence_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1105 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1106 			  struct drm_file *file_priv)
1107 {
1108 	struct vmw_private *dev_priv = vmw_priv(dev);
1109 	struct drm_vmw_fence_event_arg *arg =
1110 		(struct drm_vmw_fence_event_arg *) data;
1111 	struct vmw_fence_obj *fence = NULL;
1112 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1113 	struct ttm_object_file *tfile = vmw_fp->tfile;
1114 	struct drm_vmw_fence_rep __user *user_fence_rep =
1115 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1116 		arg->fence_rep;
1117 	uint32_t handle;
1118 	int ret;
1119 
1120 	/*
1121 	 * Look up an existing fence object,
1122 	 * and if user-space wants a new reference,
1123 	 * add one.
1124 	 */
1125 	if (arg->handle) {
1126 		struct ttm_base_object *base =
1127 			vmw_fence_obj_lookup(tfile, arg->handle);
1128 
1129 		if (IS_ERR(base))
1130 			return PTR_ERR(base);
1131 
1132 		fence = &(container_of(base, struct vmw_user_fence,
1133 				       base)->fence);
1134 		(void) vmw_fence_obj_reference(fence);
1135 
1136 		if (user_fence_rep != NULL) {
1137 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1138 						 TTM_REF_USAGE, NULL, false);
1139 			if (unlikely(ret != 0)) {
1140 				DRM_ERROR("Failed to reference a fence "
1141 					  "object.\n");
1142 				goto out_no_ref_obj;
1143 			}
1144 			handle = base->handle;
1145 		}
1146 		ttm_base_object_unref(&base);
1147 	}
1148 
1149 	/*
1150 	 * Create a new fence object.
1151 	 */
1152 	if (!fence) {
1153 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1154 						 &fence,
1155 						 (user_fence_rep) ?
1156 						 &handle : NULL);
1157 		if (unlikely(ret != 0)) {
1158 			DRM_ERROR("Fence event failed to create fence.\n");
1159 			return ret;
1160 		}
1161 	}
1162 
1163 	BUG_ON(fence == NULL);
1164 
1165 	ret = vmw_event_fence_action_create(file_priv, fence,
1166 					    arg->flags,
1167 					    arg->user_data,
1168 					    true);
1169 	if (unlikely(ret != 0)) {
1170 		if (ret != -ERESTARTSYS)
1171 			DRM_ERROR("Failed to attach event to fence.\n");
1172 		goto out_no_create;
1173 	}
1174 
1175 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1176 				    handle, -1);
1177 	vmw_fence_obj_unreference(&fence);
1178 	return 0;
1179 out_no_create:
1180 	if (user_fence_rep != NULL)
1181 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1182 out_no_ref_obj:
1183 	vmw_fence_obj_unreference(&fence);
1184 	return ret;
1185 }
1186