• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/dmapool.h>
29 #include <linux/pci.h>
30 
31 #include <drm/ttm/ttm_bo_api.h>
32 
33 #include "vmwgfx_drv.h"
34 
35 /*
36  * Size of inline command buffers. Try to make sure that a page size is a
37  * multiple of the DMA pool allocation size.
38  */
39 #define VMW_CMDBUF_INLINE_ALIGN 64
40 #define VMW_CMDBUF_INLINE_SIZE \
41 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
42 
43 /**
44  * struct vmw_cmdbuf_context - Command buffer context queues
45  *
46  * @submitted: List of command buffers that have been submitted to the
47  * manager but not yet submitted to hardware.
48  * @hw_submitted: List of command buffers submitted to hardware.
49  * @preempted: List of preempted command buffers.
50  * @num_hw_submitted: Number of buffers currently being processed by hardware
51  */
52 struct vmw_cmdbuf_context {
53 	struct list_head submitted;
54 	struct list_head hw_submitted;
55 	struct list_head preempted;
56 	unsigned num_hw_submitted;
57 	bool block_submission;
58 };
59 
60 /**
61  * struct vmw_cmdbuf_man: - Command buffer manager
62  *
63  * @cur_mutex: Mutex protecting the command buffer used for incremental small
64  * kernel command submissions, @cur.
65  * @space_mutex: Mutex to protect against starvation when we allocate
66  * main pool buffer space.
67  * @error_mutex: Mutex to serialize the work queue error handling.
68  * Note this is not needed if the same workqueue handler
69  * can't race with itself...
70  * @work: A struct work_struct implementeing command buffer error handling.
71  * Immutable.
72  * @dev_priv: Pointer to the device private struct. Immutable.
73  * @ctx: Array of command buffer context queues. The queues and the context
74  * data is protected by @lock.
75  * @error: List of command buffers that have caused device errors.
76  * Protected by @lock.
77  * @mm: Range manager for the command buffer space. Manager allocations and
78  * frees are protected by @lock.
79  * @cmd_space: Buffer object for the command buffer space, unless we were
80  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
81  * @map_obj: Mapping state for @cmd_space. Immutable.
82  * @map: Pointer to command buffer space. May be a mapped buffer object or
83  * a contigous coherent DMA memory allocation. Immutable.
84  * @cur: Command buffer for small kernel command submissions. Protected by
85  * the @cur_mutex.
86  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
87  * @default_size: Default size for the @cur command buffer. Immutable.
88  * @max_hw_submitted: Max number of in-flight command buffers the device can
89  * handle. Immutable.
90  * @lock: Spinlock protecting command submission queues.
91  * @header: Pool of DMA memory for device command buffer headers.
92  * Internal protection.
93  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
94  * space for inline data. Internal protection.
95  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
96  * space.
97  * @idle_queue: Wait queue for processes waiting for command buffer idle.
98  * @irq_on: Whether the process function has requested irq to be turned on.
99  * Protected by @lock.
100  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
101  * allocation. Immutable.
102  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
103  * Typically this is false only during bootstrap.
104  * @handle: DMA address handle for the command buffer space if @using_mob is
105  * false. Immutable.
106  * @size: The size of the command buffer space. Immutable.
107  * @num_contexts: Number of contexts actually enabled.
108  */
109 struct vmw_cmdbuf_man {
110 	struct mutex cur_mutex;
111 	struct mutex space_mutex;
112 	struct mutex error_mutex;
113 	struct work_struct work;
114 	struct vmw_private *dev_priv;
115 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
116 	struct list_head error;
117 	struct drm_mm mm;
118 	struct ttm_buffer_object *cmd_space;
119 	struct ttm_bo_kmap_obj map_obj;
120 	u8 *map;
121 	struct vmw_cmdbuf_header *cur;
122 	size_t cur_pos;
123 	size_t default_size;
124 	unsigned max_hw_submitted;
125 	spinlock_t lock;
126 	struct dma_pool *headers;
127 	struct dma_pool *dheaders;
128 	wait_queue_head_t alloc_queue;
129 	wait_queue_head_t idle_queue;
130 	bool irq_on;
131 	bool using_mob;
132 	bool has_pool;
133 	dma_addr_t handle;
134 	size_t size;
135 	u32 num_contexts;
136 };
137 
138 /**
139  * struct vmw_cmdbuf_header - Command buffer metadata
140  *
141  * @man: The command buffer manager.
142  * @cb_header: Device command buffer header, allocated from a DMA pool.
143  * @cb_context: The device command buffer context.
144  * @list: List head for attaching to the manager lists.
145  * @node: The range manager node.
146  * @handle. The DMA address of @cb_header. Handed to the device on command
147  * buffer submission.
148  * @cmd: Pointer to the command buffer space of this buffer.
149  * @size: Size of the command buffer space of this buffer.
150  * @reserved: Reserved space of this buffer.
151  * @inline_space: Whether inline command buffer space is used.
152  */
153 struct vmw_cmdbuf_header {
154 	struct vmw_cmdbuf_man *man;
155 	SVGACBHeader *cb_header;
156 	SVGACBContext cb_context;
157 	struct list_head list;
158 	struct drm_mm_node node;
159 	dma_addr_t handle;
160 	u8 *cmd;
161 	size_t size;
162 	size_t reserved;
163 	bool inline_space;
164 };
165 
166 /**
167  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
168  * command buffer space.
169  *
170  * @cb_header: Device command buffer header.
171  * @cmd: Inline command buffer space.
172  */
173 struct vmw_cmdbuf_dheader {
174 	SVGACBHeader cb_header;
175 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
176 };
177 
178 /**
179  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
180  *
181  * @page_size: Size of requested command buffer space in pages.
182  * @node: Pointer to the range manager node.
183  * @done: True if this allocation has succeeded.
184  */
185 struct vmw_cmdbuf_alloc_info {
186 	size_t page_size;
187 	struct drm_mm_node *node;
188 	bool done;
189 };
190 
191 /* Loop over each context in the command buffer manager. */
192 #define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
193 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
194 	     ++(_i), ++(_ctx))
195 
196 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
197 				bool enable);
198 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
199 
200 /**
201  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
202  *
203  * @man: The range manager.
204  * @interruptible: Whether to wait interruptible when locking.
205  */
vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man * man,bool interruptible)206 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
207 {
208 	if (interruptible) {
209 		if (mutex_lock_interruptible(&man->cur_mutex))
210 			return -ERESTARTSYS;
211 	} else {
212 		mutex_lock(&man->cur_mutex);
213 	}
214 
215 	return 0;
216 }
217 
218 /**
219  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
220  *
221  * @man: The range manager.
222  */
vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man * man)223 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
224 {
225 	mutex_unlock(&man->cur_mutex);
226 }
227 
228 /**
229  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
230  * been used for the device context with inline command buffers.
231  * Need not be called locked.
232  *
233  * @header: Pointer to the header to free.
234  */
vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header * header)235 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
236 {
237 	struct vmw_cmdbuf_dheader *dheader;
238 
239 	if (WARN_ON_ONCE(!header->inline_space))
240 		return;
241 
242 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
243 			       cb_header);
244 	dma_pool_free(header->man->dheaders, dheader, header->handle);
245 	kfree(header);
246 }
247 
248 /**
249  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
250  * associated structures.
251  *
252  * header: Pointer to the header to free.
253  *
254  * For internal use. Must be called with man::lock held.
255  */
__vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)256 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
257 {
258 	struct vmw_cmdbuf_man *man = header->man;
259 
260 	lockdep_assert_held_once(&man->lock);
261 
262 	if (header->inline_space) {
263 		vmw_cmdbuf_header_inline_free(header);
264 		return;
265 	}
266 
267 	drm_mm_remove_node(&header->node);
268 	wake_up_all(&man->alloc_queue);
269 	if (header->cb_header)
270 		dma_pool_free(man->headers, header->cb_header,
271 			      header->handle);
272 	kfree(header);
273 }
274 
275 /**
276  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
277  * associated structures.
278  *
279  * @header: Pointer to the header to free.
280  */
vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)281 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
282 {
283 	struct vmw_cmdbuf_man *man = header->man;
284 
285 	/* Avoid locking if inline_space */
286 	if (header->inline_space) {
287 		vmw_cmdbuf_header_inline_free(header);
288 		return;
289 	}
290 	spin_lock(&man->lock);
291 	__vmw_cmdbuf_header_free(header);
292 	spin_unlock(&man->lock);
293 }
294 
295 
296 /**
297  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
298  *
299  * @header: The header of the buffer to submit.
300  */
vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header * header)301 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
302 {
303 	struct vmw_cmdbuf_man *man = header->man;
304 	u32 val;
305 
306 	val = upper_32_bits(header->handle);
307 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
308 
309 	val = lower_32_bits(header->handle);
310 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
311 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
312 
313 	return header->cb_header->status;
314 }
315 
316 /**
317  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
318  *
319  * @ctx: The command buffer context to initialize
320  */
vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context * ctx)321 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
322 {
323 	INIT_LIST_HEAD(&ctx->hw_submitted);
324 	INIT_LIST_HEAD(&ctx->submitted);
325 	INIT_LIST_HEAD(&ctx->preempted);
326 	ctx->num_hw_submitted = 0;
327 }
328 
329 /**
330  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
331  * context.
332  *
333  * @man: The command buffer manager.
334  * @ctx: The command buffer context.
335  *
336  * Submits command buffers to hardware until there are no more command
337  * buffers to submit or the hardware can't handle more command buffers.
338  */
vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx)339 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
340 				  struct vmw_cmdbuf_context *ctx)
341 {
342 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
343 	       !list_empty(&ctx->submitted) &&
344 	       !ctx->block_submission) {
345 		struct vmw_cmdbuf_header *entry;
346 		SVGACBStatus status;
347 
348 		entry = list_first_entry(&ctx->submitted,
349 					 struct vmw_cmdbuf_header,
350 					 list);
351 
352 		status = vmw_cmdbuf_header_submit(entry);
353 
354 		/* This should never happen */
355 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
356 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
357 			break;
358 		}
359 
360 		list_del(&entry->list);
361 		list_add_tail(&entry->list, &ctx->hw_submitted);
362 		ctx->num_hw_submitted++;
363 	}
364 
365 }
366 
367 /**
368  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
369  *
370  * @man: The command buffer manager.
371  * @ctx: The command buffer context.
372  *
373  * Submit command buffers to hardware if possible, and process finished
374  * buffers. Typically freeing them, but on preemption or error take
375  * appropriate action. Wake up waiters if appropriate.
376  */
vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx,int * notempty)377 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
378 				   struct vmw_cmdbuf_context *ctx,
379 				   int *notempty)
380 {
381 	struct vmw_cmdbuf_header *entry, *next;
382 
383 	vmw_cmdbuf_ctx_submit(man, ctx);
384 
385 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
386 		SVGACBStatus status = entry->cb_header->status;
387 
388 		if (status == SVGA_CB_STATUS_NONE)
389 			break;
390 
391 		list_del(&entry->list);
392 		wake_up_all(&man->idle_queue);
393 		ctx->num_hw_submitted--;
394 		switch (status) {
395 		case SVGA_CB_STATUS_COMPLETED:
396 			__vmw_cmdbuf_header_free(entry);
397 			break;
398 		case SVGA_CB_STATUS_COMMAND_ERROR:
399 			WARN_ONCE(true, "Command buffer error.\n");
400 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
401 			list_add_tail(&entry->list, &man->error);
402 			schedule_work(&man->work);
403 			break;
404 		case SVGA_CB_STATUS_PREEMPTED:
405 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
406 			list_add_tail(&entry->list, &ctx->preempted);
407 			break;
408 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
409 			WARN_ONCE(true, "Command buffer header error.\n");
410 			__vmw_cmdbuf_header_free(entry);
411 			break;
412 		default:
413 			WARN_ONCE(true, "Undefined command buffer status.\n");
414 			__vmw_cmdbuf_header_free(entry);
415 			break;
416 		}
417 	}
418 
419 	vmw_cmdbuf_ctx_submit(man, ctx);
420 	if (!list_empty(&ctx->submitted))
421 		(*notempty)++;
422 }
423 
424 /**
425  * vmw_cmdbuf_man_process - Process all command buffer contexts and
426  * switch on and off irqs as appropriate.
427  *
428  * @man: The command buffer manager.
429  *
430  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
431  * command buffers left that are not submitted to hardware, Make sure
432  * IRQ handling is turned on. Otherwise, make sure it's turned off.
433  */
vmw_cmdbuf_man_process(struct vmw_cmdbuf_man * man)434 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
435 {
436 	int notempty;
437 	struct vmw_cmdbuf_context *ctx;
438 	int i;
439 
440 retry:
441 	notempty = 0;
442 	for_each_cmdbuf_ctx(man, i, ctx)
443 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
444 
445 	if (man->irq_on && !notempty) {
446 		vmw_generic_waiter_remove(man->dev_priv,
447 					  SVGA_IRQFLAG_COMMAND_BUFFER,
448 					  &man->dev_priv->cmdbuf_waiters);
449 		man->irq_on = false;
450 	} else if (!man->irq_on && notempty) {
451 		vmw_generic_waiter_add(man->dev_priv,
452 				       SVGA_IRQFLAG_COMMAND_BUFFER,
453 				       &man->dev_priv->cmdbuf_waiters);
454 		man->irq_on = true;
455 
456 		/* Rerun in case we just missed an irq. */
457 		goto retry;
458 	}
459 }
460 
461 /**
462  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
463  * command buffer context
464  *
465  * @man: The command buffer manager.
466  * @header: The header of the buffer to submit.
467  * @cb_context: The command buffer context to use.
468  *
469  * This function adds @header to the "submitted" queue of the command
470  * buffer context identified by @cb_context. It then calls the command buffer
471  * manager processing to potentially submit the buffer to hardware.
472  * @man->lock needs to be held when calling this function.
473  */
vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,SVGACBContext cb_context)474 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
475 			       struct vmw_cmdbuf_header *header,
476 			       SVGACBContext cb_context)
477 {
478 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
479 		header->cb_header->dxContext = 0;
480 	header->cb_context = cb_context;
481 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
482 
483 	vmw_cmdbuf_man_process(man);
484 }
485 
486 /**
487  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
488  * handler implemented as a threaded irq task.
489  *
490  * @man: Pointer to the command buffer manager.
491  *
492  * The bottom half of the interrupt handler simply calls into the
493  * command buffer processor to free finished buffers and submit any
494  * queued buffers to hardware.
495  */
vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man * man)496 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
497 {
498 	spin_lock(&man->lock);
499 	vmw_cmdbuf_man_process(man);
500 	spin_unlock(&man->lock);
501 }
502 
503 /**
504  * vmw_cmdbuf_work_func - The deferred work function that handles
505  * command buffer errors.
506  *
507  * @work: The work func closure argument.
508  *
509  * Restarting the command buffer context after an error requires process
510  * context, so it is deferred to this work function.
511  */
vmw_cmdbuf_work_func(struct work_struct * work)512 static void vmw_cmdbuf_work_func(struct work_struct *work)
513 {
514 	struct vmw_cmdbuf_man *man =
515 		container_of(work, struct vmw_cmdbuf_man, work);
516 	struct vmw_cmdbuf_header *entry, *next;
517 	uint32_t dummy;
518 	bool send_fence = false;
519 	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
520 	int i;
521 	struct vmw_cmdbuf_context *ctx;
522 	bool global_block = false;
523 
524 	for_each_cmdbuf_ctx(man, i, ctx)
525 		INIT_LIST_HEAD(&restart_head[i]);
526 
527 	mutex_lock(&man->error_mutex);
528 	spin_lock(&man->lock);
529 	list_for_each_entry_safe(entry, next, &man->error, list) {
530 		SVGACBHeader *cb_hdr = entry->cb_header;
531 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
532 			(entry->cmd + cb_hdr->errorOffset);
533 		u32 error_cmd_size, new_start_offset;
534 		const char *cmd_name;
535 
536 		list_del_init(&entry->list);
537 		global_block = true;
538 
539 		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
540 			VMW_DEBUG_USER("Unknown command causing device error.\n");
541 			VMW_DEBUG_USER("Command buffer offset is %lu\n",
542 				       (unsigned long) cb_hdr->errorOffset);
543 			__vmw_cmdbuf_header_free(entry);
544 			send_fence = true;
545 			continue;
546 		}
547 
548 		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
549 			       cmd_name);
550 		VMW_DEBUG_USER("Command buffer offset is %lu\n",
551 			       (unsigned long) cb_hdr->errorOffset);
552 		VMW_DEBUG_USER("Command size is %lu\n",
553 			       (unsigned long) error_cmd_size);
554 
555 		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
556 
557 		if (new_start_offset >= cb_hdr->length) {
558 			__vmw_cmdbuf_header_free(entry);
559 			send_fence = true;
560 			continue;
561 		}
562 
563 		if (man->using_mob)
564 			cb_hdr->ptr.mob.mobOffset += new_start_offset;
565 		else
566 			cb_hdr->ptr.pa += (u64) new_start_offset;
567 
568 		entry->cmd += new_start_offset;
569 		cb_hdr->length -= new_start_offset;
570 		cb_hdr->errorOffset = 0;
571 		cb_hdr->offset = 0;
572 
573 		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
574 	}
575 
576 	for_each_cmdbuf_ctx(man, i, ctx)
577 		man->ctx[i].block_submission = true;
578 
579 	spin_unlock(&man->lock);
580 
581 	/* Preempt all contexts */
582 	if (global_block && vmw_cmdbuf_preempt(man, 0))
583 		DRM_ERROR("Failed preempting command buffer contexts\n");
584 
585 	spin_lock(&man->lock);
586 	for_each_cmdbuf_ctx(man, i, ctx) {
587 		/* Move preempted command buffers to the preempted queue. */
588 		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
589 
590 		/*
591 		 * Add the preempted queue after the command buffer
592 		 * that caused an error.
593 		 */
594 		list_splice_init(&ctx->preempted, restart_head[i].prev);
595 
596 		/*
597 		 * Finally add all command buffers first in the submitted
598 		 * queue, to rerun them.
599 		 */
600 
601 		ctx->block_submission = false;
602 		list_splice_init(&restart_head[i], &ctx->submitted);
603 	}
604 
605 	vmw_cmdbuf_man_process(man);
606 	spin_unlock(&man->lock);
607 
608 	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
609 		DRM_ERROR("Failed restarting command buffer contexts\n");
610 
611 	/* Send a new fence in case one was removed */
612 	if (send_fence) {
613 		vmw_fifo_send_fence(man->dev_priv, &dummy);
614 		wake_up_all(&man->idle_queue);
615 	}
616 
617 	mutex_unlock(&man->error_mutex);
618 }
619 
620 /**
621  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
622  *
623  * @man: The command buffer manager.
624  * @check_preempted: Check also the preempted queue for pending command buffers.
625  *
626  */
vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man * man,bool check_preempted)627 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
628 				bool check_preempted)
629 {
630 	struct vmw_cmdbuf_context *ctx;
631 	bool idle = false;
632 	int i;
633 
634 	spin_lock(&man->lock);
635 	vmw_cmdbuf_man_process(man);
636 	for_each_cmdbuf_ctx(man, i, ctx) {
637 		if (!list_empty(&ctx->submitted) ||
638 		    !list_empty(&ctx->hw_submitted) ||
639 		    (check_preempted && !list_empty(&ctx->preempted)))
640 			goto out_unlock;
641 	}
642 
643 	idle = list_empty(&man->error);
644 
645 out_unlock:
646 	spin_unlock(&man->lock);
647 
648 	return idle;
649 }
650 
651 /**
652  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
653  * command submissions
654  *
655  * @man: The command buffer manager.
656  *
657  * Flushes the current command buffer without allocating a new one. A new one
658  * is automatically allocated when needed. Call with @man->cur_mutex held.
659  */
__vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man)660 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
661 {
662 	struct vmw_cmdbuf_header *cur = man->cur;
663 
664 	lockdep_assert_held_once(&man->cur_mutex);
665 
666 	if (!cur)
667 		return;
668 
669 	spin_lock(&man->lock);
670 	if (man->cur_pos == 0) {
671 		__vmw_cmdbuf_header_free(cur);
672 		goto out_unlock;
673 	}
674 
675 	man->cur->cb_header->length = man->cur_pos;
676 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
677 out_unlock:
678 	spin_unlock(&man->lock);
679 	man->cur = NULL;
680 	man->cur_pos = 0;
681 }
682 
683 /**
684  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
685  * command submissions
686  *
687  * @man: The command buffer manager.
688  * @interruptible: Whether to sleep interruptible when sleeping.
689  *
690  * Flushes the current command buffer without allocating a new one. A new one
691  * is automatically allocated when needed.
692  */
vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man,bool interruptible)693 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
694 			 bool interruptible)
695 {
696 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
697 
698 	if (ret)
699 		return ret;
700 
701 	__vmw_cmdbuf_cur_flush(man);
702 	vmw_cmdbuf_cur_unlock(man);
703 
704 	return 0;
705 }
706 
707 /**
708  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
709  *
710  * @man: The command buffer manager.
711  * @interruptible: Sleep interruptible while waiting.
712  * @timeout: Time out after this many ticks.
713  *
714  * Wait until the command buffer manager has processed all command buffers,
715  * or until a timeout occurs. If a timeout occurs, the function will return
716  * -EBUSY.
717  */
vmw_cmdbuf_idle(struct vmw_cmdbuf_man * man,bool interruptible,unsigned long timeout)718 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
719 		    unsigned long timeout)
720 {
721 	int ret;
722 
723 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
724 	vmw_generic_waiter_add(man->dev_priv,
725 			       SVGA_IRQFLAG_COMMAND_BUFFER,
726 			       &man->dev_priv->cmdbuf_waiters);
727 
728 	if (interruptible) {
729 		ret = wait_event_interruptible_timeout
730 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
731 			 timeout);
732 	} else {
733 		ret = wait_event_timeout
734 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
735 			 timeout);
736 	}
737 	vmw_generic_waiter_remove(man->dev_priv,
738 				  SVGA_IRQFLAG_COMMAND_BUFFER,
739 				  &man->dev_priv->cmdbuf_waiters);
740 	if (ret == 0) {
741 		if (!vmw_cmdbuf_man_idle(man, true))
742 			ret = -EBUSY;
743 		else
744 			ret = 0;
745 	}
746 	if (ret > 0)
747 		ret = 0;
748 
749 	return ret;
750 }
751 
752 /**
753  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
754  *
755  * @man: The command buffer manager.
756  * @info: Allocation info. Will hold the size on entry and allocated mm node
757  * on successful return.
758  *
759  * Try to allocate buffer space from the main pool. Returns true if succeeded.
760  * If a fatal error was hit, the error code is returned in @info->ret.
761  */
vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_alloc_info * info)762 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
763 				 struct vmw_cmdbuf_alloc_info *info)
764 {
765 	int ret;
766 
767 	if (info->done)
768 		return true;
769 
770 	memset(info->node, 0, sizeof(*info->node));
771 	spin_lock(&man->lock);
772 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773 	if (ret) {
774 		vmw_cmdbuf_man_process(man);
775 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
776 	}
777 
778 	spin_unlock(&man->lock);
779 	info->done = !ret;
780 
781 	return info->done;
782 }
783 
784 /**
785  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
786  *
787  * @man: The command buffer manager.
788  * @node: Pointer to pre-allocated range-manager node.
789  * @size: The size of the allocation.
790  * @interruptible: Whether to sleep interruptible while waiting for space.
791  *
792  * This function allocates buffer space from the main pool, and if there is
793  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
794  * become available.
795  */
vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man * man,struct drm_mm_node * node,size_t size,bool interruptible)796 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
797 				  struct drm_mm_node *node,
798 				  size_t size,
799 				  bool interruptible)
800 {
801 	struct vmw_cmdbuf_alloc_info info;
802 
803 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
804 	info.node = node;
805 	info.done = false;
806 
807 	/*
808 	 * To prevent starvation of large requests, only one allocating call
809 	 * at a time waiting for space.
810 	 */
811 	if (interruptible) {
812 		if (mutex_lock_interruptible(&man->space_mutex))
813 			return -ERESTARTSYS;
814 	} else {
815 		mutex_lock(&man->space_mutex);
816 	}
817 
818 	/* Try to allocate space without waiting. */
819 	if (vmw_cmdbuf_try_alloc(man, &info))
820 		goto out_unlock;
821 
822 	vmw_generic_waiter_add(man->dev_priv,
823 			       SVGA_IRQFLAG_COMMAND_BUFFER,
824 			       &man->dev_priv->cmdbuf_waiters);
825 
826 	if (interruptible) {
827 		int ret;
828 
829 		ret = wait_event_interruptible
830 			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
831 		if (ret) {
832 			vmw_generic_waiter_remove
833 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
834 				 &man->dev_priv->cmdbuf_waiters);
835 			mutex_unlock(&man->space_mutex);
836 			return ret;
837 		}
838 	} else {
839 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
840 	}
841 	vmw_generic_waiter_remove(man->dev_priv,
842 				  SVGA_IRQFLAG_COMMAND_BUFFER,
843 				  &man->dev_priv->cmdbuf_waiters);
844 
845 out_unlock:
846 	mutex_unlock(&man->space_mutex);
847 
848 	return 0;
849 }
850 
851 /**
852  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
853  * space from the main pool.
854  *
855  * @man: The command buffer manager.
856  * @header: Pointer to the header to set up.
857  * @size: The requested size of the buffer space.
858  * @interruptible: Whether to sleep interruptible while waiting for space.
859  */
vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,size_t size,bool interruptible)860 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
861 				 struct vmw_cmdbuf_header *header,
862 				 size_t size,
863 				 bool interruptible)
864 {
865 	SVGACBHeader *cb_hdr;
866 	size_t offset;
867 	int ret;
868 
869 	if (!man->has_pool)
870 		return -ENOMEM;
871 
872 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
873 
874 	if (ret)
875 		return ret;
876 
877 	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
878 					    &header->handle);
879 	if (!header->cb_header) {
880 		ret = -ENOMEM;
881 		goto out_no_cb_header;
882 	}
883 
884 	header->size = header->node.size << PAGE_SHIFT;
885 	cb_hdr = header->cb_header;
886 	offset = header->node.start << PAGE_SHIFT;
887 	header->cmd = man->map + offset;
888 	if (man->using_mob) {
889 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
890 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
891 		cb_hdr->ptr.mob.mobOffset = offset;
892 	} else {
893 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
894 	}
895 
896 	return 0;
897 
898 out_no_cb_header:
899 	spin_lock(&man->lock);
900 	drm_mm_remove_node(&header->node);
901 	spin_unlock(&man->lock);
902 
903 	return ret;
904 }
905 
906 /**
907  * vmw_cmdbuf_space_inline - Set up a command buffer header with
908  * inline command buffer space.
909  *
910  * @man: The command buffer manager.
911  * @header: Pointer to the header to set up.
912  * @size: The requested size of the buffer space.
913  */
vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,int size)914 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
915 				   struct vmw_cmdbuf_header *header,
916 				   int size)
917 {
918 	struct vmw_cmdbuf_dheader *dheader;
919 	SVGACBHeader *cb_hdr;
920 
921 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
922 		return -ENOMEM;
923 
924 	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
925 				  &header->handle);
926 	if (!dheader)
927 		return -ENOMEM;
928 
929 	header->inline_space = true;
930 	header->size = VMW_CMDBUF_INLINE_SIZE;
931 	cb_hdr = &dheader->cb_header;
932 	header->cb_header = cb_hdr;
933 	header->cmd = dheader->cmd;
934 	cb_hdr->status = SVGA_CB_STATUS_NONE;
935 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
936 	cb_hdr->ptr.pa = (u64)header->handle +
937 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
938 
939 	return 0;
940 }
941 
942 /**
943  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
944  * command buffer space.
945  *
946  * @man: The command buffer manager.
947  * @size: The requested size of the buffer space.
948  * @interruptible: Whether to sleep interruptible while waiting for space.
949  * @p_header: points to a header pointer to populate on successful return.
950  *
951  * Returns a pointer to command buffer space if successful. Otherwise
952  * returns an error pointer. The header pointer returned in @p_header should
953  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
954  */
vmw_cmdbuf_alloc(struct vmw_cmdbuf_man * man,size_t size,bool interruptible,struct vmw_cmdbuf_header ** p_header)955 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
956 		       size_t size, bool interruptible,
957 		       struct vmw_cmdbuf_header **p_header)
958 {
959 	struct vmw_cmdbuf_header *header;
960 	int ret = 0;
961 
962 	*p_header = NULL;
963 
964 	header = kzalloc(sizeof(*header), GFP_KERNEL);
965 	if (!header)
966 		return ERR_PTR(-ENOMEM);
967 
968 	if (size <= VMW_CMDBUF_INLINE_SIZE)
969 		ret = vmw_cmdbuf_space_inline(man, header, size);
970 	else
971 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
972 
973 	if (ret) {
974 		kfree(header);
975 		return ERR_PTR(ret);
976 	}
977 
978 	header->man = man;
979 	INIT_LIST_HEAD(&header->list);
980 	header->cb_header->status = SVGA_CB_STATUS_NONE;
981 	*p_header = header;
982 
983 	return header->cmd;
984 }
985 
986 /**
987  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
988  * command buffer.
989  *
990  * @man: The command buffer manager.
991  * @size: The requested size of the commands.
992  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
993  * @interruptible: Whether to sleep interruptible while waiting for space.
994  *
995  * Returns a pointer to command buffer space if successful. Otherwise
996  * returns an error pointer.
997  */
vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible)998 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
999 				    size_t size,
1000 				    int ctx_id,
1001 				    bool interruptible)
1002 {
1003 	struct vmw_cmdbuf_header *cur;
1004 	void *ret;
1005 
1006 	if (vmw_cmdbuf_cur_lock(man, interruptible))
1007 		return ERR_PTR(-ERESTARTSYS);
1008 
1009 	cur = man->cur;
1010 	if (cur && (size + man->cur_pos > cur->size ||
1011 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1012 		     ctx_id != cur->cb_header->dxContext)))
1013 		__vmw_cmdbuf_cur_flush(man);
1014 
1015 	if (!man->cur) {
1016 		ret = vmw_cmdbuf_alloc(man,
1017 				       max_t(size_t, size, man->default_size),
1018 				       interruptible, &man->cur);
1019 		if (IS_ERR(ret)) {
1020 			vmw_cmdbuf_cur_unlock(man);
1021 			return ret;
1022 		}
1023 
1024 		cur = man->cur;
1025 	}
1026 
1027 	if (ctx_id != SVGA3D_INVALID_ID) {
1028 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1029 		cur->cb_header->dxContext = ctx_id;
1030 	}
1031 
1032 	cur->reserved = size;
1033 
1034 	return (void *) (man->cur->cmd + man->cur_pos);
1035 }
1036 
1037 /**
1038  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1039  *
1040  * @man: The command buffer manager.
1041  * @size: The size of the commands actually written.
1042  * @flush: Whether to flush the command buffer immediately.
1043  */
vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man * man,size_t size,bool flush)1044 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1045 				  size_t size, bool flush)
1046 {
1047 	struct vmw_cmdbuf_header *cur = man->cur;
1048 
1049 	lockdep_assert_held_once(&man->cur_mutex);
1050 
1051 	WARN_ON(size > cur->reserved);
1052 	man->cur_pos += size;
1053 	if (!size)
1054 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1055 	if (flush)
1056 		__vmw_cmdbuf_cur_flush(man);
1057 	vmw_cmdbuf_cur_unlock(man);
1058 }
1059 
1060 /**
1061  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1062  *
1063  * @man: The command buffer manager.
1064  * @size: The requested size of the commands.
1065  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1066  * @interruptible: Whether to sleep interruptible while waiting for space.
1067  * @header: Header of the command buffer. NULL if the current command buffer
1068  * should be used.
1069  *
1070  * Returns a pointer to command buffer space if successful. Otherwise
1071  * returns an error pointer.
1072  */
vmw_cmdbuf_reserve(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible,struct vmw_cmdbuf_header * header)1073 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1074 			 int ctx_id, bool interruptible,
1075 			 struct vmw_cmdbuf_header *header)
1076 {
1077 	if (!header)
1078 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1079 
1080 	if (size > header->size)
1081 		return ERR_PTR(-EINVAL);
1082 
1083 	if (ctx_id != SVGA3D_INVALID_ID) {
1084 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1085 		header->cb_header->dxContext = ctx_id;
1086 	}
1087 
1088 	header->reserved = size;
1089 	return header->cmd;
1090 }
1091 
1092 /**
1093  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1094  *
1095  * @man: The command buffer manager.
1096  * @size: The size of the commands actually written.
1097  * @header: Header of the command buffer. NULL if the current command buffer
1098  * should be used.
1099  * @flush: Whether to flush the command buffer immediately.
1100  */
vmw_cmdbuf_commit(struct vmw_cmdbuf_man * man,size_t size,struct vmw_cmdbuf_header * header,bool flush)1101 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1102 		       struct vmw_cmdbuf_header *header, bool flush)
1103 {
1104 	if (!header) {
1105 		vmw_cmdbuf_commit_cur(man, size, flush);
1106 		return;
1107 	}
1108 
1109 	(void) vmw_cmdbuf_cur_lock(man, false);
1110 	__vmw_cmdbuf_cur_flush(man);
1111 	WARN_ON(size > header->reserved);
1112 	man->cur = header;
1113 	man->cur_pos = size;
1114 	if (!size)
1115 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1116 	if (flush)
1117 		__vmw_cmdbuf_cur_flush(man);
1118 	vmw_cmdbuf_cur_unlock(man);
1119 }
1120 
1121 
1122 /**
1123  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1124  *
1125  * @man: The command buffer manager.
1126  * @command: Pointer to the command to send.
1127  * @size: Size of the command.
1128  *
1129  * Synchronously sends a device context command.
1130  */
vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man * man,const void * command,size_t size)1131 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1132 					  const void *command,
1133 					  size_t size)
1134 {
1135 	struct vmw_cmdbuf_header *header;
1136 	int status;
1137 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1138 
1139 	if (IS_ERR(cmd))
1140 		return PTR_ERR(cmd);
1141 
1142 	memcpy(cmd, command, size);
1143 	header->cb_header->length = size;
1144 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1145 	spin_lock(&man->lock);
1146 	status = vmw_cmdbuf_header_submit(header);
1147 	spin_unlock(&man->lock);
1148 	vmw_cmdbuf_header_free(header);
1149 
1150 	if (status != SVGA_CB_STATUS_COMPLETED) {
1151 		DRM_ERROR("Device context command failed with status %d\n",
1152 			  status);
1153 		return -EINVAL;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 /**
1160  * vmw_cmdbuf_preempt - Send a preempt command through the device
1161  * context.
1162  *
1163  * @man: The command buffer manager.
1164  *
1165  * Synchronously sends a preempt command.
1166  */
vmw_cmdbuf_preempt(struct vmw_cmdbuf_man * man,u32 context)1167 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1168 {
1169 	struct {
1170 		uint32 id;
1171 		SVGADCCmdPreempt body;
1172 	} __packed cmd;
1173 
1174 	cmd.id = SVGA_DC_CMD_PREEMPT;
1175 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1176 	cmd.body.ignoreIDZero = 0;
1177 
1178 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1179 }
1180 
1181 
1182 /**
1183  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1184  * context.
1185  *
1186  * @man: The command buffer manager.
1187  * @enable: Whether to enable or disable the context.
1188  *
1189  * Synchronously sends a device start / stop context command.
1190  */
vmw_cmdbuf_startstop(struct vmw_cmdbuf_man * man,u32 context,bool enable)1191 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1192 				bool enable)
1193 {
1194 	struct {
1195 		uint32 id;
1196 		SVGADCCmdStartStop body;
1197 	} __packed cmd;
1198 
1199 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1200 	cmd.body.enable = (enable) ? 1 : 0;
1201 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1202 
1203 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1204 }
1205 
1206 /**
1207  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1208  *
1209  * @man: The command buffer manager.
1210  * @size: The size of the main space pool.
1211  * @default_size: The default size of the command buffer for small kernel
1212  * submissions.
1213  *
1214  * Set the size and allocate the main command buffer space pool,
1215  * as well as the default size of the command buffer for
1216  * small kernel submissions. If successful, this enables large command
1217  * submissions. Note that this function requires that rudimentary command
1218  * submission is already available and that the MOB memory manager is alive.
1219  * Returns 0 on success. Negative error code on failure.
1220  */
vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man * man,size_t size,size_t default_size)1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1222 			     size_t size, size_t default_size)
1223 {
1224 	struct vmw_private *dev_priv = man->dev_priv;
1225 	bool dummy;
1226 	int ret;
1227 
1228 	if (man->has_pool)
1229 		return -EINVAL;
1230 
1231 	/* First, try to allocate a huge chunk of DMA memory */
1232 	size = PAGE_ALIGN(size);
1233 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1234 				      &man->handle, GFP_KERNEL);
1235 	if (man->map) {
1236 		man->using_mob = false;
1237 	} else {
1238 		/*
1239 		 * DMA memory failed. If we can have command buffers in a
1240 		 * MOB, try to use that instead. Note that this will
1241 		 * actually call into the already enabled manager, when
1242 		 * binding the MOB.
1243 		 */
1244 		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1245 		    !dev_priv->has_mob)
1246 			return -ENOMEM;
1247 
1248 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1249 				    &vmw_mob_ne_placement, 0, false,
1250 				    &man->cmd_space);
1251 		if (ret)
1252 			return ret;
1253 
1254 		man->using_mob = true;
1255 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1256 				  &man->map_obj);
1257 		if (ret)
1258 			goto out_no_map;
1259 
1260 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1261 	}
1262 
1263 	man->size = size;
1264 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265 
1266 	man->has_pool = true;
1267 
1268 	/*
1269 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1270 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1271 	 * needs to wait for space and we block on further command
1272 	 * submissions to be able to free up space.
1273 	 */
1274 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275 	DRM_INFO("Using command buffers with %s pool.\n",
1276 		 (man->using_mob) ? "MOB" : "DMA");
1277 
1278 	return 0;
1279 
1280 out_no_map:
1281 	if (man->using_mob) {
1282 		ttm_bo_put(man->cmd_space);
1283 		man->cmd_space = NULL;
1284 	}
1285 
1286 	return ret;
1287 }
1288 
1289 /**
1290  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291  * inline command buffer submissions only.
1292  *
1293  * @dev_priv: Pointer to device private structure.
1294  *
1295  * Returns a pointer to a cummand buffer manager to success or error pointer
1296  * on failure. The command buffer manager will be enabled for submissions of
1297  * size VMW_CMDBUF_INLINE_SIZE only.
1298  */
vmw_cmdbuf_man_create(struct vmw_private * dev_priv)1299 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300 {
1301 	struct vmw_cmdbuf_man *man;
1302 	struct vmw_cmdbuf_context *ctx;
1303 	unsigned int i;
1304 	int ret;
1305 
1306 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307 		return ERR_PTR(-ENOSYS);
1308 
1309 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1310 	if (!man)
1311 		return ERR_PTR(-ENOMEM);
1312 
1313 	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314 		2 : 1;
1315 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1316 				       &dev_priv->dev->pdev->dev,
1317 				       sizeof(SVGACBHeader),
1318 				       64, PAGE_SIZE);
1319 	if (!man->headers) {
1320 		ret = -ENOMEM;
1321 		goto out_no_pool;
1322 	}
1323 
1324 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325 					&dev_priv->dev->pdev->dev,
1326 					sizeof(struct vmw_cmdbuf_dheader),
1327 					64, PAGE_SIZE);
1328 	if (!man->dheaders) {
1329 		ret = -ENOMEM;
1330 		goto out_no_dpool;
1331 	}
1332 
1333 	for_each_cmdbuf_ctx(man, i, ctx)
1334 		vmw_cmdbuf_ctx_init(ctx);
1335 
1336 	INIT_LIST_HEAD(&man->error);
1337 	spin_lock_init(&man->lock);
1338 	mutex_init(&man->cur_mutex);
1339 	mutex_init(&man->space_mutex);
1340 	mutex_init(&man->error_mutex);
1341 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342 	init_waitqueue_head(&man->alloc_queue);
1343 	init_waitqueue_head(&man->idle_queue);
1344 	man->dev_priv = dev_priv;
1345 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348 			       &dev_priv->error_waiters);
1349 	ret = vmw_cmdbuf_startstop(man, 0, true);
1350 	if (ret) {
1351 		DRM_ERROR("Failed starting command buffer contexts\n");
1352 		vmw_cmdbuf_man_destroy(man);
1353 		return ERR_PTR(ret);
1354 	}
1355 
1356 	return man;
1357 
1358 out_no_dpool:
1359 	dma_pool_destroy(man->headers);
1360 out_no_pool:
1361 	kfree(man);
1362 
1363 	return ERR_PTR(ret);
1364 }
1365 
1366 /**
1367  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368  *
1369  * @man: Pointer to a command buffer manager.
1370  *
1371  * This function removes the main buffer space pool, and should be called
1372  * before MOB memory management is removed. When this function has been called,
1373  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374  * less are allowed, and the default size of the command buffer for small kernel
1375  * submissions is also set to this size.
1376  */
vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man * man)1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378 {
1379 	if (!man->has_pool)
1380 		return;
1381 
1382 	man->has_pool = false;
1383 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385 	if (man->using_mob) {
1386 		(void) ttm_bo_kunmap(&man->map_obj);
1387 		ttm_bo_put(man->cmd_space);
1388 		man->cmd_space = NULL;
1389 	} else {
1390 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1391 				  man->size, man->map, man->handle);
1392 	}
1393 }
1394 
1395 /**
1396  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397  *
1398  * @man: Pointer to a command buffer manager.
1399  *
1400  * This function idles and then destroys a command buffer manager.
1401  */
vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man * man)1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403 {
1404 	WARN_ON_ONCE(man->has_pool);
1405 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406 
1407 	if (vmw_cmdbuf_startstop(man, 0, false))
1408 		DRM_ERROR("Failed stopping command buffer contexts.\n");
1409 
1410 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411 				  &man->dev_priv->error_waiters);
1412 	(void) cancel_work_sync(&man->work);
1413 	dma_pool_destroy(man->dheaders);
1414 	dma_pool_destroy(man->headers);
1415 	mutex_destroy(&man->cur_mutex);
1416 	mutex_destroy(&man->space_mutex);
1417 	mutex_destroy(&man->error_mutex);
1418 	kfree(man);
1419 }
1420