• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "ttm/ttm_bo_api.h"
30 
31 /*
32  * Size of inline command buffers. Try to make sure that a page size is a
33  * multiple of the DMA pool allocation size.
34  */
35 #define VMW_CMDBUF_INLINE_ALIGN 64
36 #define VMW_CMDBUF_INLINE_SIZE \
37 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
38 
39 /**
40  * struct vmw_cmdbuf_context - Command buffer context queues
41  *
42  * @submitted: List of command buffers that have been submitted to the
43  * manager but not yet submitted to hardware.
44  * @hw_submitted: List of command buffers submitted to hardware.
45  * @preempted: List of preempted command buffers.
46  * @num_hw_submitted: Number of buffers currently being processed by hardware
47  */
48 struct vmw_cmdbuf_context {
49 	struct list_head submitted;
50 	struct list_head hw_submitted;
51 	struct list_head preempted;
52 	unsigned num_hw_submitted;
53 };
54 
55 /**
56  * struct vmw_cmdbuf_man: - Command buffer manager
57  *
58  * @cur_mutex: Mutex protecting the command buffer used for incremental small
59  * kernel command submissions, @cur.
60  * @space_mutex: Mutex to protect against starvation when we allocate
61  * main pool buffer space.
62  * @work: A struct work_struct implementeing command buffer error handling.
63  * Immutable.
64  * @dev_priv: Pointer to the device private struct. Immutable.
65  * @ctx: Array of command buffer context queues. The queues and the context
66  * data is protected by @lock.
67  * @error: List of command buffers that have caused device errors.
68  * Protected by @lock.
69  * @mm: Range manager for the command buffer space. Manager allocations and
70  * frees are protected by @lock.
71  * @cmd_space: Buffer object for the command buffer space, unless we were
72  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
73  * @map_obj: Mapping state for @cmd_space. Immutable.
74  * @map: Pointer to command buffer space. May be a mapped buffer object or
75  * a contigous coherent DMA memory allocation. Immutable.
76  * @cur: Command buffer for small kernel command submissions. Protected by
77  * the @cur_mutex.
78  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
79  * @default_size: Default size for the @cur command buffer. Immutable.
80  * @max_hw_submitted: Max number of in-flight command buffers the device can
81  * handle. Immutable.
82  * @lock: Spinlock protecting command submission queues.
83  * @header: Pool of DMA memory for device command buffer headers.
84  * Internal protection.
85  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
86  * space for inline data. Internal protection.
87  * @tasklet: Tasklet struct for irq processing. Immutable.
88  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
89  * space.
90  * @idle_queue: Wait queue for processes waiting for command buffer idle.
91  * @irq_on: Whether the process function has requested irq to be turned on.
92  * Protected by @lock.
93  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
94  * allocation. Immutable.
95  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
96  * Typically this is false only during bootstrap.
97  * @handle: DMA address handle for the command buffer space if @using_mob is
98  * false. Immutable.
99  * @size: The size of the command buffer space. Immutable.
100  */
101 struct vmw_cmdbuf_man {
102 	struct mutex cur_mutex;
103 	struct mutex space_mutex;
104 	struct work_struct work;
105 	struct vmw_private *dev_priv;
106 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
107 	struct list_head error;
108 	struct drm_mm mm;
109 	struct ttm_buffer_object *cmd_space;
110 	struct ttm_bo_kmap_obj map_obj;
111 	u8 *map;
112 	struct vmw_cmdbuf_header *cur;
113 	size_t cur_pos;
114 	size_t default_size;
115 	unsigned max_hw_submitted;
116 	spinlock_t lock;
117 	struct dma_pool *headers;
118 	struct dma_pool *dheaders;
119 	struct tasklet_struct tasklet;
120 	wait_queue_head_t alloc_queue;
121 	wait_queue_head_t idle_queue;
122 	bool irq_on;
123 	bool using_mob;
124 	bool has_pool;
125 	dma_addr_t handle;
126 	size_t size;
127 };
128 
129 /**
130  * struct vmw_cmdbuf_header - Command buffer metadata
131  *
132  * @man: The command buffer manager.
133  * @cb_header: Device command buffer header, allocated from a DMA pool.
134  * @cb_context: The device command buffer context.
135  * @list: List head for attaching to the manager lists.
136  * @node: The range manager node.
137  * @handle. The DMA address of @cb_header. Handed to the device on command
138  * buffer submission.
139  * @cmd: Pointer to the command buffer space of this buffer.
140  * @size: Size of the command buffer space of this buffer.
141  * @reserved: Reserved space of this buffer.
142  * @inline_space: Whether inline command buffer space is used.
143  */
144 struct vmw_cmdbuf_header {
145 	struct vmw_cmdbuf_man *man;
146 	SVGACBHeader *cb_header;
147 	SVGACBContext cb_context;
148 	struct list_head list;
149 	struct drm_mm_node node;
150 	dma_addr_t handle;
151 	u8 *cmd;
152 	size_t size;
153 	size_t reserved;
154 	bool inline_space;
155 };
156 
157 /**
158  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
159  * command buffer space.
160  *
161  * @cb_header: Device command buffer header.
162  * @cmd: Inline command buffer space.
163  */
164 struct vmw_cmdbuf_dheader {
165 	SVGACBHeader cb_header;
166 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
167 };
168 
169 /**
170  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
171  *
172  * @page_size: Size of requested command buffer space in pages.
173  * @node: Pointer to the range manager node.
174  * @done: True if this allocation has succeeded.
175  */
176 struct vmw_cmdbuf_alloc_info {
177 	size_t page_size;
178 	struct drm_mm_node *node;
179 	bool done;
180 };
181 
182 /* Loop over each context in the command buffer manager. */
183 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
184 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
185 	     ++(_i), ++(_ctx))
186 
187 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
188 
189 
190 /**
191  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
192  *
193  * @man: The range manager.
194  * @interruptible: Whether to wait interruptible when locking.
195  */
vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man * man,bool interruptible)196 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
197 {
198 	if (interruptible) {
199 		if (mutex_lock_interruptible(&man->cur_mutex))
200 			return -ERESTARTSYS;
201 	} else {
202 		mutex_lock(&man->cur_mutex);
203 	}
204 
205 	return 0;
206 }
207 
208 /**
209  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
210  *
211  * @man: The range manager.
212  */
vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man * man)213 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
214 {
215 	mutex_unlock(&man->cur_mutex);
216 }
217 
218 /**
219  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
220  * been used for the device context with inline command buffers.
221  * Need not be called locked.
222  *
223  * @header: Pointer to the header to free.
224  */
vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header * header)225 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
226 {
227 	struct vmw_cmdbuf_dheader *dheader;
228 
229 	if (WARN_ON_ONCE(!header->inline_space))
230 		return;
231 
232 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
233 			       cb_header);
234 	dma_pool_free(header->man->dheaders, dheader, header->handle);
235 	kfree(header);
236 }
237 
238 /**
239  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
240  * associated structures.
241  *
242  * header: Pointer to the header to free.
243  *
244  * For internal use. Must be called with man::lock held.
245  */
__vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)246 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
247 {
248 	struct vmw_cmdbuf_man *man = header->man;
249 
250 	lockdep_assert_held_once(&man->lock);
251 
252 	if (header->inline_space) {
253 		vmw_cmdbuf_header_inline_free(header);
254 		return;
255 	}
256 
257 	drm_mm_remove_node(&header->node);
258 	wake_up_all(&man->alloc_queue);
259 	if (header->cb_header)
260 		dma_pool_free(man->headers, header->cb_header,
261 			      header->handle);
262 	kfree(header);
263 }
264 
265 /**
266  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
267  * associated structures.
268  *
269  * @header: Pointer to the header to free.
270  */
vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)271 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
272 {
273 	struct vmw_cmdbuf_man *man = header->man;
274 
275 	/* Avoid locking if inline_space */
276 	if (header->inline_space) {
277 		vmw_cmdbuf_header_inline_free(header);
278 		return;
279 	}
280 	spin_lock_bh(&man->lock);
281 	__vmw_cmdbuf_header_free(header);
282 	spin_unlock_bh(&man->lock);
283 }
284 
285 
286 /**
287  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
288  *
289  * @header: The header of the buffer to submit.
290  */
vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header * header)291 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
292 {
293 	struct vmw_cmdbuf_man *man = header->man;
294 	u32 val;
295 
296 	val = upper_32_bits(header->handle);
297 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
298 
299 	val = lower_32_bits(header->handle);
300 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
301 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
302 
303 	return header->cb_header->status;
304 }
305 
306 /**
307  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
308  *
309  * @ctx: The command buffer context to initialize
310  */
vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context * ctx)311 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
312 {
313 	INIT_LIST_HEAD(&ctx->hw_submitted);
314 	INIT_LIST_HEAD(&ctx->submitted);
315 	INIT_LIST_HEAD(&ctx->preempted);
316 	ctx->num_hw_submitted = 0;
317 }
318 
319 /**
320  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
321  * context.
322  *
323  * @man: The command buffer manager.
324  * @ctx: The command buffer context.
325  *
326  * Submits command buffers to hardware until there are no more command
327  * buffers to submit or the hardware can't handle more command buffers.
328  */
vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx)329 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
330 				  struct vmw_cmdbuf_context *ctx)
331 {
332 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
333 	      !list_empty(&ctx->submitted)) {
334 		struct vmw_cmdbuf_header *entry;
335 		SVGACBStatus status;
336 
337 		entry = list_first_entry(&ctx->submitted,
338 					 struct vmw_cmdbuf_header,
339 					 list);
340 
341 		status = vmw_cmdbuf_header_submit(entry);
342 
343 		/* This should never happen */
344 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
345 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
346 			break;
347 		}
348 
349 		list_del(&entry->list);
350 		list_add_tail(&entry->list, &ctx->hw_submitted);
351 		ctx->num_hw_submitted++;
352 	}
353 
354 }
355 
356 /**
357  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
358  *
359  * @man: The command buffer manager.
360  * @ctx: The command buffer context.
361  *
362  * Submit command buffers to hardware if possible, and process finished
363  * buffers. Typically freeing them, but on preemption or error take
364  * appropriate action. Wake up waiters if appropriate.
365  */
vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx,int * notempty)366 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
367 				   struct vmw_cmdbuf_context *ctx,
368 				   int *notempty)
369 {
370 	struct vmw_cmdbuf_header *entry, *next;
371 
372 	vmw_cmdbuf_ctx_submit(man, ctx);
373 
374 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
375 		SVGACBStatus status = entry->cb_header->status;
376 
377 		if (status == SVGA_CB_STATUS_NONE)
378 			break;
379 
380 		list_del(&entry->list);
381 		wake_up_all(&man->idle_queue);
382 		ctx->num_hw_submitted--;
383 		switch (status) {
384 		case SVGA_CB_STATUS_COMPLETED:
385 			__vmw_cmdbuf_header_free(entry);
386 			break;
387 		case SVGA_CB_STATUS_COMMAND_ERROR:
388 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
389 			list_add_tail(&entry->list, &man->error);
390 			schedule_work(&man->work);
391 			break;
392 		case SVGA_CB_STATUS_PREEMPTED:
393 			list_add(&entry->list, &ctx->preempted);
394 			break;
395 		default:
396 			WARN_ONCE(true, "Undefined command buffer status.\n");
397 			__vmw_cmdbuf_header_free(entry);
398 			break;
399 		}
400 	}
401 
402 	vmw_cmdbuf_ctx_submit(man, ctx);
403 	if (!list_empty(&ctx->submitted))
404 		(*notempty)++;
405 }
406 
407 /**
408  * vmw_cmdbuf_man_process - Process all command buffer contexts and
409  * switch on and off irqs as appropriate.
410  *
411  * @man: The command buffer manager.
412  *
413  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
414  * command buffers left that are not submitted to hardware, Make sure
415  * IRQ handling is turned on. Otherwise, make sure it's turned off.
416  */
vmw_cmdbuf_man_process(struct vmw_cmdbuf_man * man)417 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
418 {
419 	int notempty;
420 	struct vmw_cmdbuf_context *ctx;
421 	int i;
422 
423 retry:
424 	notempty = 0;
425 	for_each_cmdbuf_ctx(man, i, ctx)
426 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
427 
428 	if (man->irq_on && !notempty) {
429 		vmw_generic_waiter_remove(man->dev_priv,
430 					  SVGA_IRQFLAG_COMMAND_BUFFER,
431 					  &man->dev_priv->cmdbuf_waiters);
432 		man->irq_on = false;
433 	} else if (!man->irq_on && notempty) {
434 		vmw_generic_waiter_add(man->dev_priv,
435 				       SVGA_IRQFLAG_COMMAND_BUFFER,
436 				       &man->dev_priv->cmdbuf_waiters);
437 		man->irq_on = true;
438 
439 		/* Rerun in case we just missed an irq. */
440 		goto retry;
441 	}
442 }
443 
444 /**
445  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
446  * command buffer context
447  *
448  * @man: The command buffer manager.
449  * @header: The header of the buffer to submit.
450  * @cb_context: The command buffer context to use.
451  *
452  * This function adds @header to the "submitted" queue of the command
453  * buffer context identified by @cb_context. It then calls the command buffer
454  * manager processing to potentially submit the buffer to hardware.
455  * @man->lock needs to be held when calling this function.
456  */
vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,SVGACBContext cb_context)457 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
458 			       struct vmw_cmdbuf_header *header,
459 			       SVGACBContext cb_context)
460 {
461 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
462 		header->cb_header->dxContext = 0;
463 	header->cb_context = cb_context;
464 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
465 
466 	vmw_cmdbuf_man_process(man);
467 }
468 
469 /**
470  * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
471  * handler implemented as a tasklet.
472  *
473  * @data: Tasklet closure. A pointer to the command buffer manager cast to
474  * an unsigned long.
475  *
476  * The bottom half (tasklet) of the interrupt handler simply calls into the
477  * command buffer processor to free finished buffers and submit any
478  * queued buffers to hardware.
479  */
vmw_cmdbuf_man_tasklet(unsigned long data)480 static void vmw_cmdbuf_man_tasklet(unsigned long data)
481 {
482 	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
483 
484 	spin_lock(&man->lock);
485 	vmw_cmdbuf_man_process(man);
486 	spin_unlock(&man->lock);
487 }
488 
489 /**
490  * vmw_cmdbuf_work_func - The deferred work function that handles
491  * command buffer errors.
492  *
493  * @work: The work func closure argument.
494  *
495  * Restarting the command buffer context after an error requires process
496  * context, so it is deferred to this work function.
497  */
vmw_cmdbuf_work_func(struct work_struct * work)498 static void vmw_cmdbuf_work_func(struct work_struct *work)
499 {
500 	struct vmw_cmdbuf_man *man =
501 		container_of(work, struct vmw_cmdbuf_man, work);
502 	struct vmw_cmdbuf_header *entry, *next;
503 	uint32_t dummy;
504 	bool restart = false;
505 
506 	spin_lock_bh(&man->lock);
507 	list_for_each_entry_safe(entry, next, &man->error, list) {
508 		restart = true;
509 		DRM_ERROR("Command buffer error.\n");
510 
511 		list_del(&entry->list);
512 		__vmw_cmdbuf_header_free(entry);
513 		wake_up_all(&man->idle_queue);
514 	}
515 	spin_unlock_bh(&man->lock);
516 
517 	if (restart && vmw_cmdbuf_startstop(man, true))
518 		DRM_ERROR("Failed restarting command buffer context 0.\n");
519 
520 	/* Send a new fence in case one was removed */
521 	vmw_fifo_send_fence(man->dev_priv, &dummy);
522 }
523 
524 /**
525  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
526  *
527  * @man: The command buffer manager.
528  * @check_preempted: Check also the preempted queue for pending command buffers.
529  *
530  */
vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man * man,bool check_preempted)531 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
532 				bool check_preempted)
533 {
534 	struct vmw_cmdbuf_context *ctx;
535 	bool idle = false;
536 	int i;
537 
538 	spin_lock_bh(&man->lock);
539 	vmw_cmdbuf_man_process(man);
540 	for_each_cmdbuf_ctx(man, i, ctx) {
541 		if (!list_empty(&ctx->submitted) ||
542 		    !list_empty(&ctx->hw_submitted) ||
543 		    (check_preempted && !list_empty(&ctx->preempted)))
544 			goto out_unlock;
545 	}
546 
547 	idle = list_empty(&man->error);
548 
549 out_unlock:
550 	spin_unlock_bh(&man->lock);
551 
552 	return idle;
553 }
554 
555 /**
556  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
557  * command submissions
558  *
559  * @man: The command buffer manager.
560  *
561  * Flushes the current command buffer without allocating a new one. A new one
562  * is automatically allocated when needed. Call with @man->cur_mutex held.
563  */
__vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man)564 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
565 {
566 	struct vmw_cmdbuf_header *cur = man->cur;
567 
568 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
569 
570 	if (!cur)
571 		return;
572 
573 	spin_lock_bh(&man->lock);
574 	if (man->cur_pos == 0) {
575 		__vmw_cmdbuf_header_free(cur);
576 		goto out_unlock;
577 	}
578 
579 	man->cur->cb_header->length = man->cur_pos;
580 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
581 out_unlock:
582 	spin_unlock_bh(&man->lock);
583 	man->cur = NULL;
584 	man->cur_pos = 0;
585 }
586 
587 /**
588  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
589  * command submissions
590  *
591  * @man: The command buffer manager.
592  * @interruptible: Whether to sleep interruptible when sleeping.
593  *
594  * Flushes the current command buffer without allocating a new one. A new one
595  * is automatically allocated when needed.
596  */
vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man,bool interruptible)597 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
598 			 bool interruptible)
599 {
600 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
601 
602 	if (ret)
603 		return ret;
604 
605 	__vmw_cmdbuf_cur_flush(man);
606 	vmw_cmdbuf_cur_unlock(man);
607 
608 	return 0;
609 }
610 
611 /**
612  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
613  *
614  * @man: The command buffer manager.
615  * @interruptible: Sleep interruptible while waiting.
616  * @timeout: Time out after this many ticks.
617  *
618  * Wait until the command buffer manager has processed all command buffers,
619  * or until a timeout occurs. If a timeout occurs, the function will return
620  * -EBUSY.
621  */
vmw_cmdbuf_idle(struct vmw_cmdbuf_man * man,bool interruptible,unsigned long timeout)622 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
623 		    unsigned long timeout)
624 {
625 	int ret;
626 
627 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
628 	vmw_generic_waiter_add(man->dev_priv,
629 			       SVGA_IRQFLAG_COMMAND_BUFFER,
630 			       &man->dev_priv->cmdbuf_waiters);
631 
632 	if (interruptible) {
633 		ret = wait_event_interruptible_timeout
634 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
635 			 timeout);
636 	} else {
637 		ret = wait_event_timeout
638 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
639 			 timeout);
640 	}
641 	vmw_generic_waiter_remove(man->dev_priv,
642 				  SVGA_IRQFLAG_COMMAND_BUFFER,
643 				  &man->dev_priv->cmdbuf_waiters);
644 	if (ret == 0) {
645 		if (!vmw_cmdbuf_man_idle(man, true))
646 			ret = -EBUSY;
647 		else
648 			ret = 0;
649 	}
650 	if (ret > 0)
651 		ret = 0;
652 
653 	return ret;
654 }
655 
656 /**
657  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
658  *
659  * @man: The command buffer manager.
660  * @info: Allocation info. Will hold the size on entry and allocated mm node
661  * on successful return.
662  *
663  * Try to allocate buffer space from the main pool. Returns true if succeeded.
664  * If a fatal error was hit, the error code is returned in @info->ret.
665  */
vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_alloc_info * info)666 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
667 				 struct vmw_cmdbuf_alloc_info *info)
668 {
669 	int ret;
670 
671 	if (info->done)
672 		return true;
673 
674 	memset(info->node, 0, sizeof(*info->node));
675 	spin_lock_bh(&man->lock);
676 	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
677 					 0, 0,
678 					 DRM_MM_SEARCH_DEFAULT,
679 					 DRM_MM_CREATE_DEFAULT);
680 	if (ret) {
681 		vmw_cmdbuf_man_process(man);
682 		ret = drm_mm_insert_node_generic(&man->mm, info->node,
683 						 info->page_size, 0, 0,
684 						 DRM_MM_SEARCH_DEFAULT,
685 						 DRM_MM_CREATE_DEFAULT);
686 	}
687 
688 	spin_unlock_bh(&man->lock);
689 	info->done = !ret;
690 
691 	return info->done;
692 }
693 
694 /**
695  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
696  *
697  * @man: The command buffer manager.
698  * @node: Pointer to pre-allocated range-manager node.
699  * @size: The size of the allocation.
700  * @interruptible: Whether to sleep interruptible while waiting for space.
701  *
702  * This function allocates buffer space from the main pool, and if there is
703  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
704  * become available.
705  */
vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man * man,struct drm_mm_node * node,size_t size,bool interruptible)706 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
707 				  struct drm_mm_node *node,
708 				  size_t size,
709 				  bool interruptible)
710 {
711 	struct vmw_cmdbuf_alloc_info info;
712 
713 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
714 	info.node = node;
715 	info.done = false;
716 
717 	/*
718 	 * To prevent starvation of large requests, only one allocating call
719 	 * at a time waiting for space.
720 	 */
721 	if (interruptible) {
722 		if (mutex_lock_interruptible(&man->space_mutex))
723 			return -ERESTARTSYS;
724 	} else {
725 		mutex_lock(&man->space_mutex);
726 	}
727 
728 	/* Try to allocate space without waiting. */
729 	if (vmw_cmdbuf_try_alloc(man, &info))
730 		goto out_unlock;
731 
732 	vmw_generic_waiter_add(man->dev_priv,
733 			       SVGA_IRQFLAG_COMMAND_BUFFER,
734 			       &man->dev_priv->cmdbuf_waiters);
735 
736 	if (interruptible) {
737 		int ret;
738 
739 		ret = wait_event_interruptible
740 			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
741 		if (ret) {
742 			vmw_generic_waiter_remove
743 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
744 				 &man->dev_priv->cmdbuf_waiters);
745 			mutex_unlock(&man->space_mutex);
746 			return ret;
747 		}
748 	} else {
749 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
750 	}
751 	vmw_generic_waiter_remove(man->dev_priv,
752 				  SVGA_IRQFLAG_COMMAND_BUFFER,
753 				  &man->dev_priv->cmdbuf_waiters);
754 
755 out_unlock:
756 	mutex_unlock(&man->space_mutex);
757 
758 	return 0;
759 }
760 
761 /**
762  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
763  * space from the main pool.
764  *
765  * @man: The command buffer manager.
766  * @header: Pointer to the header to set up.
767  * @size: The requested size of the buffer space.
768  * @interruptible: Whether to sleep interruptible while waiting for space.
769  */
vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,size_t size,bool interruptible)770 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
771 				 struct vmw_cmdbuf_header *header,
772 				 size_t size,
773 				 bool interruptible)
774 {
775 	SVGACBHeader *cb_hdr;
776 	size_t offset;
777 	int ret;
778 
779 	if (!man->has_pool)
780 		return -ENOMEM;
781 
782 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
783 
784 	if (ret)
785 		return ret;
786 
787 	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
788 					   &header->handle);
789 	if (!header->cb_header) {
790 		ret = -ENOMEM;
791 		goto out_no_cb_header;
792 	}
793 
794 	header->size = header->node.size << PAGE_SHIFT;
795 	cb_hdr = header->cb_header;
796 	offset = header->node.start << PAGE_SHIFT;
797 	header->cmd = man->map + offset;
798 	memset(cb_hdr, 0, sizeof(*cb_hdr));
799 	if (man->using_mob) {
800 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
801 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
802 		cb_hdr->ptr.mob.mobOffset = offset;
803 	} else {
804 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
805 	}
806 
807 	return 0;
808 
809 out_no_cb_header:
810 	spin_lock_bh(&man->lock);
811 	drm_mm_remove_node(&header->node);
812 	spin_unlock_bh(&man->lock);
813 
814 	return ret;
815 }
816 
817 /**
818  * vmw_cmdbuf_space_inline - Set up a command buffer header with
819  * inline command buffer space.
820  *
821  * @man: The command buffer manager.
822  * @header: Pointer to the header to set up.
823  * @size: The requested size of the buffer space.
824  */
vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,int size)825 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
826 				   struct vmw_cmdbuf_header *header,
827 				   int size)
828 {
829 	struct vmw_cmdbuf_dheader *dheader;
830 	SVGACBHeader *cb_hdr;
831 
832 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
833 		return -ENOMEM;
834 
835 	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
836 				 &header->handle);
837 	if (!dheader)
838 		return -ENOMEM;
839 
840 	header->inline_space = true;
841 	header->size = VMW_CMDBUF_INLINE_SIZE;
842 	cb_hdr = &dheader->cb_header;
843 	header->cb_header = cb_hdr;
844 	header->cmd = dheader->cmd;
845 	memset(dheader, 0, sizeof(*dheader));
846 	cb_hdr->status = SVGA_CB_STATUS_NONE;
847 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
848 	cb_hdr->ptr.pa = (u64)header->handle +
849 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
850 
851 	return 0;
852 }
853 
854 /**
855  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
856  * command buffer space.
857  *
858  * @man: The command buffer manager.
859  * @size: The requested size of the buffer space.
860  * @interruptible: Whether to sleep interruptible while waiting for space.
861  * @p_header: points to a header pointer to populate on successful return.
862  *
863  * Returns a pointer to command buffer space if successful. Otherwise
864  * returns an error pointer. The header pointer returned in @p_header should
865  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
866  */
vmw_cmdbuf_alloc(struct vmw_cmdbuf_man * man,size_t size,bool interruptible,struct vmw_cmdbuf_header ** p_header)867 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
868 		       size_t size, bool interruptible,
869 		       struct vmw_cmdbuf_header **p_header)
870 {
871 	struct vmw_cmdbuf_header *header;
872 	int ret = 0;
873 
874 	*p_header = NULL;
875 
876 	header = kzalloc(sizeof(*header), GFP_KERNEL);
877 	if (!header)
878 		return ERR_PTR(-ENOMEM);
879 
880 	if (size <= VMW_CMDBUF_INLINE_SIZE)
881 		ret = vmw_cmdbuf_space_inline(man, header, size);
882 	else
883 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
884 
885 	if (ret) {
886 		kfree(header);
887 		return ERR_PTR(ret);
888 	}
889 
890 	header->man = man;
891 	INIT_LIST_HEAD(&header->list);
892 	header->cb_header->status = SVGA_CB_STATUS_NONE;
893 	*p_header = header;
894 
895 	return header->cmd;
896 }
897 
898 /**
899  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
900  * command buffer.
901  *
902  * @man: The command buffer manager.
903  * @size: The requested size of the commands.
904  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
905  * @interruptible: Whether to sleep interruptible while waiting for space.
906  *
907  * Returns a pointer to command buffer space if successful. Otherwise
908  * returns an error pointer.
909  */
vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible)910 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
911 				    size_t size,
912 				    int ctx_id,
913 				    bool interruptible)
914 {
915 	struct vmw_cmdbuf_header *cur;
916 	void *ret;
917 
918 	if (vmw_cmdbuf_cur_lock(man, interruptible))
919 		return ERR_PTR(-ERESTARTSYS);
920 
921 	cur = man->cur;
922 	if (cur && (size + man->cur_pos > cur->size ||
923 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
924 		     ctx_id != cur->cb_header->dxContext)))
925 		__vmw_cmdbuf_cur_flush(man);
926 
927 	if (!man->cur) {
928 		ret = vmw_cmdbuf_alloc(man,
929 				       max_t(size_t, size, man->default_size),
930 				       interruptible, &man->cur);
931 		if (IS_ERR(ret)) {
932 			vmw_cmdbuf_cur_unlock(man);
933 			return ret;
934 		}
935 
936 		cur = man->cur;
937 	}
938 
939 	if (ctx_id != SVGA3D_INVALID_ID) {
940 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
941 		cur->cb_header->dxContext = ctx_id;
942 	}
943 
944 	cur->reserved = size;
945 
946 	return (void *) (man->cur->cmd + man->cur_pos);
947 }
948 
949 /**
950  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
951  *
952  * @man: The command buffer manager.
953  * @size: The size of the commands actually written.
954  * @flush: Whether to flush the command buffer immediately.
955  */
vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man * man,size_t size,bool flush)956 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
957 				  size_t size, bool flush)
958 {
959 	struct vmw_cmdbuf_header *cur = man->cur;
960 
961 	WARN_ON(!mutex_is_locked(&man->cur_mutex));
962 
963 	WARN_ON(size > cur->reserved);
964 	man->cur_pos += size;
965 	if (!size)
966 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
967 	if (flush)
968 		__vmw_cmdbuf_cur_flush(man);
969 	vmw_cmdbuf_cur_unlock(man);
970 }
971 
972 /**
973  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
974  *
975  * @man: The command buffer manager.
976  * @size: The requested size of the commands.
977  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
978  * @interruptible: Whether to sleep interruptible while waiting for space.
979  * @header: Header of the command buffer. NULL if the current command buffer
980  * should be used.
981  *
982  * Returns a pointer to command buffer space if successful. Otherwise
983  * returns an error pointer.
984  */
vmw_cmdbuf_reserve(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible,struct vmw_cmdbuf_header * header)985 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
986 			 int ctx_id, bool interruptible,
987 			 struct vmw_cmdbuf_header *header)
988 {
989 	if (!header)
990 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
991 
992 	if (size > header->size)
993 		return ERR_PTR(-EINVAL);
994 
995 	if (ctx_id != SVGA3D_INVALID_ID) {
996 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
997 		header->cb_header->dxContext = ctx_id;
998 	}
999 
1000 	header->reserved = size;
1001 	return header->cmd;
1002 }
1003 
1004 /**
1005  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1006  *
1007  * @man: The command buffer manager.
1008  * @size: The size of the commands actually written.
1009  * @header: Header of the command buffer. NULL if the current command buffer
1010  * should be used.
1011  * @flush: Whether to flush the command buffer immediately.
1012  */
vmw_cmdbuf_commit(struct vmw_cmdbuf_man * man,size_t size,struct vmw_cmdbuf_header * header,bool flush)1013 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1014 		       struct vmw_cmdbuf_header *header, bool flush)
1015 {
1016 	if (!header) {
1017 		vmw_cmdbuf_commit_cur(man, size, flush);
1018 		return;
1019 	}
1020 
1021 	(void) vmw_cmdbuf_cur_lock(man, false);
1022 	__vmw_cmdbuf_cur_flush(man);
1023 	WARN_ON(size > header->reserved);
1024 	man->cur = header;
1025 	man->cur_pos = size;
1026 	if (!size)
1027 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1028 	if (flush)
1029 		__vmw_cmdbuf_cur_flush(man);
1030 	vmw_cmdbuf_cur_unlock(man);
1031 }
1032 
1033 /**
1034  * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1035  *
1036  * @man: The command buffer manager.
1037  */
vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man * man)1038 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1039 {
1040 	if (!man)
1041 		return;
1042 
1043 	tasklet_schedule(&man->tasklet);
1044 }
1045 
1046 /**
1047  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1048  *
1049  * @man: The command buffer manager.
1050  * @command: Pointer to the command to send.
1051  * @size: Size of the command.
1052  *
1053  * Synchronously sends a device context command.
1054  */
vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man * man,const void * command,size_t size)1055 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1056 					  const void *command,
1057 					  size_t size)
1058 {
1059 	struct vmw_cmdbuf_header *header;
1060 	int status;
1061 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1062 
1063 	if (IS_ERR(cmd))
1064 		return PTR_ERR(cmd);
1065 
1066 	memcpy(cmd, command, size);
1067 	header->cb_header->length = size;
1068 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1069 	spin_lock_bh(&man->lock);
1070 	status = vmw_cmdbuf_header_submit(header);
1071 	spin_unlock_bh(&man->lock);
1072 	vmw_cmdbuf_header_free(header);
1073 
1074 	if (status != SVGA_CB_STATUS_COMPLETED) {
1075 		DRM_ERROR("Device context command failed with status %d\n",
1076 			  status);
1077 		return -EINVAL;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 /**
1084  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1085  * context.
1086  *
1087  * @man: The command buffer manager.
1088  * @enable: Whether to enable or disable the context.
1089  *
1090  * Synchronously sends a device start / stop context command.
1091  */
vmw_cmdbuf_startstop(struct vmw_cmdbuf_man * man,bool enable)1092 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1093 				bool enable)
1094 {
1095 	struct {
1096 		uint32 id;
1097 		SVGADCCmdStartStop body;
1098 	} __packed cmd;
1099 
1100 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1101 	cmd.body.enable = (enable) ? 1 : 0;
1102 	cmd.body.context = SVGA_CB_CONTEXT_0;
1103 
1104 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1105 }
1106 
1107 /**
1108  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1109  *
1110  * @man: The command buffer manager.
1111  * @size: The size of the main space pool.
1112  * @default_size: The default size of the command buffer for small kernel
1113  * submissions.
1114  *
1115  * Set the size and allocate the main command buffer space pool,
1116  * as well as the default size of the command buffer for
1117  * small kernel submissions. If successful, this enables large command
1118  * submissions. Note that this function requires that rudimentary command
1119  * submission is already available and that the MOB memory manager is alive.
1120  * Returns 0 on success. Negative error code on failure.
1121  */
vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man * man,size_t size,size_t default_size)1122 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1123 			     size_t size, size_t default_size)
1124 {
1125 	struct vmw_private *dev_priv = man->dev_priv;
1126 	bool dummy;
1127 	int ret;
1128 
1129 	if (man->has_pool)
1130 		return -EINVAL;
1131 
1132 	/* First, try to allocate a huge chunk of DMA memory */
1133 	size = PAGE_ALIGN(size);
1134 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1135 				      &man->handle, GFP_KERNEL);
1136 	if (man->map) {
1137 		man->using_mob = false;
1138 	} else {
1139 		/*
1140 		 * DMA memory failed. If we can have command buffers in a
1141 		 * MOB, try to use that instead. Note that this will
1142 		 * actually call into the already enabled manager, when
1143 		 * binding the MOB.
1144 		 */
1145 		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1146 			return -ENOMEM;
1147 
1148 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1149 				    &vmw_mob_ne_placement, 0, false, NULL,
1150 				    &man->cmd_space);
1151 		if (ret)
1152 			return ret;
1153 
1154 		man->using_mob = true;
1155 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1156 				  &man->map_obj);
1157 		if (ret)
1158 			goto out_no_map;
1159 
1160 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1161 	}
1162 
1163 	man->size = size;
1164 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1165 
1166 	man->has_pool = true;
1167 
1168 	/*
1169 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1170 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1171 	 * needs to wait for space and we block on further command
1172 	 * submissions to be able to free up space.
1173 	 */
1174 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1175 	DRM_INFO("Using command buffers with %s pool.\n",
1176 		 (man->using_mob) ? "MOB" : "DMA");
1177 
1178 	return 0;
1179 
1180 out_no_map:
1181 	if (man->using_mob)
1182 		ttm_bo_unref(&man->cmd_space);
1183 
1184 	return ret;
1185 }
1186 
1187 /**
1188  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1189  * inline command buffer submissions only.
1190  *
1191  * @dev_priv: Pointer to device private structure.
1192  *
1193  * Returns a pointer to a cummand buffer manager to success or error pointer
1194  * on failure. The command buffer manager will be enabled for submissions of
1195  * size VMW_CMDBUF_INLINE_SIZE only.
1196  */
vmw_cmdbuf_man_create(struct vmw_private * dev_priv)1197 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1198 {
1199 	struct vmw_cmdbuf_man *man;
1200 	struct vmw_cmdbuf_context *ctx;
1201 	int i;
1202 	int ret;
1203 
1204 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1205 		return ERR_PTR(-ENOSYS);
1206 
1207 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1208 	if (!man)
1209 		return ERR_PTR(-ENOMEM);
1210 
1211 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1212 				       &dev_priv->dev->pdev->dev,
1213 				       sizeof(SVGACBHeader),
1214 				       64, PAGE_SIZE);
1215 	if (!man->headers) {
1216 		ret = -ENOMEM;
1217 		goto out_no_pool;
1218 	}
1219 
1220 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1221 					&dev_priv->dev->pdev->dev,
1222 					sizeof(struct vmw_cmdbuf_dheader),
1223 					64, PAGE_SIZE);
1224 	if (!man->dheaders) {
1225 		ret = -ENOMEM;
1226 		goto out_no_dpool;
1227 	}
1228 
1229 	for_each_cmdbuf_ctx(man, i, ctx)
1230 		vmw_cmdbuf_ctx_init(ctx);
1231 
1232 	INIT_LIST_HEAD(&man->error);
1233 	spin_lock_init(&man->lock);
1234 	mutex_init(&man->cur_mutex);
1235 	mutex_init(&man->space_mutex);
1236 	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1237 		     (unsigned long) man);
1238 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1239 	init_waitqueue_head(&man->alloc_queue);
1240 	init_waitqueue_head(&man->idle_queue);
1241 	man->dev_priv = dev_priv;
1242 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1243 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1244 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1245 			       &dev_priv->error_waiters);
1246 	ret = vmw_cmdbuf_startstop(man, true);
1247 	if (ret) {
1248 		DRM_ERROR("Failed starting command buffer context 0.\n");
1249 		vmw_cmdbuf_man_destroy(man);
1250 		return ERR_PTR(ret);
1251 	}
1252 
1253 	return man;
1254 
1255 out_no_dpool:
1256 	dma_pool_destroy(man->headers);
1257 out_no_pool:
1258 	kfree(man);
1259 
1260 	return ERR_PTR(ret);
1261 }
1262 
1263 /**
1264  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1265  *
1266  * @man: Pointer to a command buffer manager.
1267  *
1268  * This function removes the main buffer space pool, and should be called
1269  * before MOB memory management is removed. When this function has been called,
1270  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1271  * less are allowed, and the default size of the command buffer for small kernel
1272  * submissions is also set to this size.
1273  */
vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man * man)1274 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1275 {
1276 	if (!man->has_pool)
1277 		return;
1278 
1279 	man->has_pool = false;
1280 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1281 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1282 	if (man->using_mob) {
1283 		(void) ttm_bo_kunmap(&man->map_obj);
1284 		ttm_bo_unref(&man->cmd_space);
1285 	} else {
1286 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1287 				  man->size, man->map, man->handle);
1288 	}
1289 }
1290 
1291 /**
1292  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1293  *
1294  * @man: Pointer to a command buffer manager.
1295  *
1296  * This function idles and then destroys a command buffer manager.
1297  */
vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man * man)1298 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1299 {
1300 	WARN_ON_ONCE(man->has_pool);
1301 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1302 	if (vmw_cmdbuf_startstop(man, false))
1303 		DRM_ERROR("Failed stopping command buffer context 0.\n");
1304 
1305 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1306 				  &man->dev_priv->error_waiters);
1307 	tasklet_kill(&man->tasklet);
1308 	(void) cancel_work_sync(&man->work);
1309 	dma_pool_destroy(man->dheaders);
1310 	dma_pool_destroy(man->headers);
1311 	mutex_destroy(&man->cur_mutex);
1312 	mutex_destroy(&man->space_mutex);
1313 	kfree(man);
1314 }
1315