1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008,2010 Intel Corporation
5 */
6
7 #include <linux/intel-iommu.h>
8 #include <linux/dma-resv.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
11
12 #include <drm/drm_syncobj.h>
13
14 #include "display/intel_frontbuffer.h"
15
16 #include "gem/i915_gem_ioctls.h"
17 #include "gt/intel_context.h"
18 #include "gt/intel_gt.h"
19 #include "gt/intel_gt_buffer_pool.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_ring.h"
22
23 #include "i915_drv.h"
24 #include "i915_gem_clflush.h"
25 #include "i915_gem_context.h"
26 #include "i915_gem_ioctls.h"
27 #include "i915_trace.h"
28 #include "i915_user_extensions.h"
29
30 struct eb_vma {
31 struct i915_vma *vma;
32 unsigned int flags;
33
34 /** This vma's place in the execbuf reservation list */
35 struct drm_i915_gem_exec_object2 *exec;
36 struct list_head bind_link;
37 struct list_head reloc_link;
38
39 struct hlist_node node;
40 u32 handle;
41 };
42
43 enum {
44 FORCE_CPU_RELOC = 1,
45 FORCE_GTT_RELOC,
46 FORCE_GPU_RELOC,
47 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
48 };
49
50 #define __EXEC_OBJECT_HAS_PIN BIT(31)
51 #define __EXEC_OBJECT_HAS_FENCE BIT(30)
52 #define __EXEC_OBJECT_NEEDS_MAP BIT(29)
53 #define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
54 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
55 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
56
57 #define __EXEC_HAS_RELOC BIT(31)
58 #define __EXEC_ENGINE_PINNED BIT(30)
59 #define __EXEC_INTERNAL_FLAGS (~0u << 30)
60 #define UPDATE PIN_OFFSET_FIXED
61
62 #define BATCH_OFFSET_BIAS (256*1024)
63
64 #define __I915_EXEC_ILLEGAL_FLAGS \
65 (__I915_EXEC_UNKNOWN_FLAGS | \
66 I915_EXEC_CONSTANTS_MASK | \
67 I915_EXEC_RESOURCE_STREAMER)
68
69 /* Catch emission of unexpected errors for CI! */
70 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
71 #undef EINVAL
72 #define EINVAL ({ \
73 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
74 22; \
75 })
76 #endif
77
78 /**
79 * DOC: User command execution
80 *
81 * Userspace submits commands to be executed on the GPU as an instruction
82 * stream within a GEM object we call a batchbuffer. This instructions may
83 * refer to other GEM objects containing auxiliary state such as kernels,
84 * samplers, render targets and even secondary batchbuffers. Userspace does
85 * not know where in the GPU memory these objects reside and so before the
86 * batchbuffer is passed to the GPU for execution, those addresses in the
87 * batchbuffer and auxiliary objects are updated. This is known as relocation,
88 * or patching. To try and avoid having to relocate each object on the next
89 * execution, userspace is told the location of those objects in this pass,
90 * but this remains just a hint as the kernel may choose a new location for
91 * any object in the future.
92 *
93 * At the level of talking to the hardware, submitting a batchbuffer for the
94 * GPU to execute is to add content to a buffer from which the HW
95 * command streamer is reading.
96 *
97 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
98 * Execlists, this command is not placed on the same buffer as the
99 * remaining items.
100 *
101 * 2. Add a command to invalidate caches to the buffer.
102 *
103 * 3. Add a batchbuffer start command to the buffer; the start command is
104 * essentially a token together with the GPU address of the batchbuffer
105 * to be executed.
106 *
107 * 4. Add a pipeline flush to the buffer.
108 *
109 * 5. Add a memory write command to the buffer to record when the GPU
110 * is done executing the batchbuffer. The memory write writes the
111 * global sequence number of the request, ``i915_request::global_seqno``;
112 * the i915 driver uses the current value in the register to determine
113 * if the GPU has completed the batchbuffer.
114 *
115 * 6. Add a user interrupt command to the buffer. This command instructs
116 * the GPU to issue an interrupt when the command, pipeline flush and
117 * memory write are completed.
118 *
119 * 7. Inform the hardware of the additional commands added to the buffer
120 * (by updating the tail pointer).
121 *
122 * Processing an execbuf ioctl is conceptually split up into a few phases.
123 *
124 * 1. Validation - Ensure all the pointers, handles and flags are valid.
125 * 2. Reservation - Assign GPU address space for every object
126 * 3. Relocation - Update any addresses to point to the final locations
127 * 4. Serialisation - Order the request with respect to its dependencies
128 * 5. Construction - Construct a request to execute the batchbuffer
129 * 6. Submission (at some point in the future execution)
130 *
131 * Reserving resources for the execbuf is the most complicated phase. We
132 * neither want to have to migrate the object in the address space, nor do
133 * we want to have to update any relocations pointing to this object. Ideally,
134 * we want to leave the object where it is and for all the existing relocations
135 * to match. If the object is given a new address, or if userspace thinks the
136 * object is elsewhere, we have to parse all the relocation entries and update
137 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
138 * all the target addresses in all of its objects match the value in the
139 * relocation entries and that they all match the presumed offsets given by the
140 * list of execbuffer objects. Using this knowledge, we know that if we haven't
141 * moved any buffers, all the relocation entries are valid and we can skip
142 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
143 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
144 *
145 * The addresses written in the objects must match the corresponding
146 * reloc.presumed_offset which in turn must match the corresponding
147 * execobject.offset.
148 *
149 * Any render targets written to in the batch must be flagged with
150 * EXEC_OBJECT_WRITE.
151 *
152 * To avoid stalling, execobject.offset should match the current
153 * address of that object within the active context.
154 *
155 * The reservation is done is multiple phases. First we try and keep any
156 * object already bound in its current location - so as long as meets the
157 * constraints imposed by the new execbuffer. Any object left unbound after the
158 * first pass is then fitted into any available idle space. If an object does
159 * not fit, all objects are removed from the reservation and the process rerun
160 * after sorting the objects into a priority order (more difficult to fit
161 * objects are tried first). Failing that, the entire VM is cleared and we try
162 * to fit the execbuf once last time before concluding that it simply will not
163 * fit.
164 *
165 * A small complication to all of this is that we allow userspace not only to
166 * specify an alignment and a size for the object in the address space, but
167 * we also allow userspace to specify the exact offset. This objects are
168 * simpler to place (the location is known a priori) all we have to do is make
169 * sure the space is available.
170 *
171 * Once all the objects are in place, patching up the buried pointers to point
172 * to the final locations is a fairly simple job of walking over the relocation
173 * entry arrays, looking up the right address and rewriting the value into
174 * the object. Simple! ... The relocation entries are stored in user memory
175 * and so to access them we have to copy them into a local buffer. That copy
176 * has to avoid taking any pagefaults as they may lead back to a GEM object
177 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
178 * the relocation into multiple passes. First we try to do everything within an
179 * atomic context (avoid the pagefaults) which requires that we never wait. If
180 * we detect that we may wait, or if we need to fault, then we have to fallback
181 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
182 * bells yet?) Dropping the mutex means that we lose all the state we have
183 * built up so far for the execbuf and we must reset any global data. However,
184 * we do leave the objects pinned in their final locations - which is a
185 * potential issue for concurrent execbufs. Once we have left the mutex, we can
186 * allocate and copy all the relocation entries into a large array at our
187 * leisure, reacquire the mutex, reclaim all the objects and other state and
188 * then proceed to update any incorrect addresses with the objects.
189 *
190 * As we process the relocation entries, we maintain a record of whether the
191 * object is being written to. Using NORELOC, we expect userspace to provide
192 * this information instead. We also check whether we can skip the relocation
193 * by comparing the expected value inside the relocation entry with the target's
194 * final address. If they differ, we have to map the current object and rewrite
195 * the 4 or 8 byte pointer within.
196 *
197 * Serialising an execbuf is quite simple according to the rules of the GEM
198 * ABI. Execution within each context is ordered by the order of submission.
199 * Writes to any GEM object are in order of submission and are exclusive. Reads
200 * from a GEM object are unordered with respect to other reads, but ordered by
201 * writes. A write submitted after a read cannot occur before the read, and
202 * similarly any read submitted after a write cannot occur before the write.
203 * Writes are ordered between engines such that only one write occurs at any
204 * time (completing any reads beforehand) - using semaphores where available
205 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
206 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
207 * reads before starting, and any read (either using set-domain or pread) must
208 * flush all GPU writes before starting. (Note we only employ a barrier before,
209 * we currently rely on userspace not concurrently starting a new execution
210 * whilst reading or writing to an object. This may be an advantage or not
211 * depending on how much you trust userspace not to shoot themselves in the
212 * foot.) Serialisation may just result in the request being inserted into
213 * a DAG awaiting its turn, but most simple is to wait on the CPU until
214 * all dependencies are resolved.
215 *
216 * After all of that, is just a matter of closing the request and handing it to
217 * the hardware (well, leaving it in a queue to be executed). However, we also
218 * offer the ability for batchbuffers to be run with elevated privileges so
219 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
220 * Before any batch is given extra privileges we first must check that it
221 * contains no nefarious instructions, we check that each instruction is from
222 * our whitelist and all registers are also from an allowed list. We first
223 * copy the user's batchbuffer to a shadow (so that the user doesn't have
224 * access to it, either by the CPU or GPU as we scan it) and then parse each
225 * instruction. If everything is ok, we set a flag telling the hardware to run
226 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
227 */
228
229 struct eb_fence {
230 struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
231 struct dma_fence *dma_fence;
232 u64 value;
233 struct dma_fence_chain *chain_fence;
234 };
235
236 struct i915_execbuffer {
237 struct drm_i915_private *i915; /** i915 backpointer */
238 struct drm_file *file; /** per-file lookup tables and limits */
239 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
240 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
241 struct eb_vma *vma;
242
243 struct intel_engine_cs *engine; /** engine to queue the request to */
244 struct intel_context *context; /* logical state for the request */
245 struct i915_gem_context *gem_context; /** caller's context */
246
247 struct i915_request *request; /** our request to build */
248 struct eb_vma *batch; /** identity of the batch obj/vma */
249 struct i915_vma *trampoline; /** trampoline used for chaining */
250
251 /** actual size of execobj[] as we may extend it for the cmdparser */
252 unsigned int buffer_count;
253
254 /** list of vma not yet bound during reservation phase */
255 struct list_head unbound;
256
257 /** list of vma that have execobj.relocation_count */
258 struct list_head relocs;
259
260 struct i915_gem_ww_ctx ww;
261
262 /**
263 * Track the most recently used object for relocations, as we
264 * frequently have to perform multiple relocations within the same
265 * obj/page
266 */
267 struct reloc_cache {
268 struct drm_mm_node node; /** temporary GTT binding */
269 unsigned long vaddr; /** Current kmap address */
270 unsigned long page; /** Currently mapped page index */
271 unsigned int gen; /** Cached value of INTEL_GEN */
272 bool use_64bit_reloc : 1;
273 bool has_llc : 1;
274 bool has_fence : 1;
275 bool needs_unfenced : 1;
276
277 struct i915_request *rq;
278 u32 *rq_cmd;
279 unsigned int rq_size;
280 struct intel_gt_buffer_pool_node *pool;
281 } reloc_cache;
282
283 struct intel_gt_buffer_pool_node *reloc_pool; /** relocation pool for -EDEADLK handling */
284 struct intel_context *reloc_context;
285
286 u64 invalid_flags; /** Set of execobj.flags that are invalid */
287 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
288
289 u64 batch_len; /** Length of batch within object */
290 u32 batch_start_offset; /** Location within object of batch */
291 u32 batch_flags; /** Flags composed for emit_bb_start() */
292 struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
293
294 /**
295 * Indicate either the size of the hastable used to resolve
296 * relocation handles, or if negative that we are using a direct
297 * index into the execobj[].
298 */
299 int lut_size;
300 struct hlist_head *buckets; /** ht for relocation handles */
301
302 struct eb_fence *fences;
303 unsigned long num_fences;
304 };
305
306 static int eb_parse(struct i915_execbuffer *eb);
307 static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
308 bool throttle);
309 static void eb_unpin_engine(struct i915_execbuffer *eb);
310
eb_use_cmdparser(const struct i915_execbuffer * eb)311 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
312 {
313 return intel_engine_requires_cmd_parser(eb->engine) ||
314 (intel_engine_using_cmd_parser(eb->engine) &&
315 eb->args->batch_len);
316 }
317
eb_create(struct i915_execbuffer * eb)318 static int eb_create(struct i915_execbuffer *eb)
319 {
320 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
321 unsigned int size = 1 + ilog2(eb->buffer_count);
322
323 /*
324 * Without a 1:1 association between relocation handles and
325 * the execobject[] index, we instead create a hashtable.
326 * We size it dynamically based on available memory, starting
327 * first with 1:1 assocative hash and scaling back until
328 * the allocation succeeds.
329 *
330 * Later on we use a positive lut_size to indicate we are
331 * using this hashtable, and a negative value to indicate a
332 * direct lookup.
333 */
334 do {
335 gfp_t flags;
336
337 /* While we can still reduce the allocation size, don't
338 * raise a warning and allow the allocation to fail.
339 * On the last pass though, we want to try as hard
340 * as possible to perform the allocation and warn
341 * if it fails.
342 */
343 flags = GFP_KERNEL;
344 if (size > 1)
345 flags |= __GFP_NORETRY | __GFP_NOWARN;
346
347 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
348 flags);
349 if (eb->buckets)
350 break;
351 } while (--size);
352
353 if (unlikely(!size))
354 return -ENOMEM;
355
356 eb->lut_size = size;
357 } else {
358 eb->lut_size = -eb->buffer_count;
359 }
360
361 return 0;
362 }
363
364 static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 * entry,const struct i915_vma * vma,unsigned int flags)365 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
366 const struct i915_vma *vma,
367 unsigned int flags)
368 {
369 if (vma->node.size < entry->pad_to_size)
370 return true;
371
372 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
373 return true;
374
375 if (flags & EXEC_OBJECT_PINNED &&
376 vma->node.start != entry->offset)
377 return true;
378
379 if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
380 vma->node.start < BATCH_OFFSET_BIAS)
381 return true;
382
383 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
384 (vma->node.start + vma->node.size + 4095) >> 32)
385 return true;
386
387 if (flags & __EXEC_OBJECT_NEEDS_MAP &&
388 !i915_vma_is_map_and_fenceable(vma))
389 return true;
390
391 return false;
392 }
393
eb_pin_flags(const struct drm_i915_gem_exec_object2 * entry,unsigned int exec_flags)394 static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
395 unsigned int exec_flags)
396 {
397 u64 pin_flags = 0;
398
399 if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
400 pin_flags |= PIN_GLOBAL;
401
402 /*
403 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
404 * limit address to the first 4GBs for unflagged objects.
405 */
406 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
407 pin_flags |= PIN_ZONE_4G;
408
409 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
410 pin_flags |= PIN_MAPPABLE;
411
412 if (exec_flags & EXEC_OBJECT_PINNED)
413 pin_flags |= entry->offset | PIN_OFFSET_FIXED;
414 else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
415 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
416
417 return pin_flags;
418 }
419
420 static inline bool
eb_pin_vma(struct i915_execbuffer * eb,const struct drm_i915_gem_exec_object2 * entry,struct eb_vma * ev)421 eb_pin_vma(struct i915_execbuffer *eb,
422 const struct drm_i915_gem_exec_object2 *entry,
423 struct eb_vma *ev)
424 {
425 struct i915_vma *vma = ev->vma;
426 u64 pin_flags;
427
428 if (vma->node.size)
429 pin_flags = vma->node.start;
430 else
431 pin_flags = entry->offset & PIN_OFFSET_MASK;
432
433 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
434 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
435 pin_flags |= PIN_GLOBAL;
436
437 /* Attempt to reuse the current location if available */
438 /* TODO: Add -EDEADLK handling here */
439 if (unlikely(i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags))) {
440 if (entry->flags & EXEC_OBJECT_PINNED)
441 return false;
442
443 /* Failing that pick any _free_ space if suitable */
444 if (unlikely(i915_vma_pin_ww(vma, &eb->ww,
445 entry->pad_to_size,
446 entry->alignment,
447 eb_pin_flags(entry, ev->flags) |
448 PIN_USER | PIN_NOEVICT)))
449 return false;
450 }
451
452 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
453 if (unlikely(i915_vma_pin_fence(vma))) {
454 i915_vma_unpin(vma);
455 return false;
456 }
457
458 if (vma->fence)
459 ev->flags |= __EXEC_OBJECT_HAS_FENCE;
460 }
461
462 ev->flags |= __EXEC_OBJECT_HAS_PIN;
463 return !eb_vma_misplaced(entry, vma, ev->flags);
464 }
465
466 static inline void
eb_unreserve_vma(struct eb_vma * ev)467 eb_unreserve_vma(struct eb_vma *ev)
468 {
469 if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
470 return;
471
472 if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
473 __i915_vma_unpin_fence(ev->vma);
474
475 __i915_vma_unpin(ev->vma);
476 ev->flags &= ~__EXEC_OBJECT_RESERVED;
477 }
478
479 static int
eb_validate_vma(struct i915_execbuffer * eb,struct drm_i915_gem_exec_object2 * entry,struct i915_vma * vma)480 eb_validate_vma(struct i915_execbuffer *eb,
481 struct drm_i915_gem_exec_object2 *entry,
482 struct i915_vma *vma)
483 {
484 if (unlikely(entry->flags & eb->invalid_flags))
485 return -EINVAL;
486
487 if (unlikely(entry->alignment &&
488 !is_power_of_2_u64(entry->alignment)))
489 return -EINVAL;
490
491 /*
492 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
493 * any non-page-aligned or non-canonical addresses.
494 */
495 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
496 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
497 return -EINVAL;
498
499 /* pad_to_size was once a reserved field, so sanitize it */
500 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
501 if (unlikely(offset_in_page(entry->pad_to_size)))
502 return -EINVAL;
503 } else {
504 entry->pad_to_size = 0;
505 }
506 /*
507 * From drm_mm perspective address space is continuous,
508 * so from this point we're always using non-canonical
509 * form internally.
510 */
511 entry->offset = gen8_noncanonical_addr(entry->offset);
512
513 if (!eb->reloc_cache.has_fence) {
514 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
515 } else {
516 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
517 eb->reloc_cache.needs_unfenced) &&
518 i915_gem_object_is_tiled(vma->obj))
519 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
520 }
521
522 if (!(entry->flags & EXEC_OBJECT_PINNED))
523 entry->flags |= eb->context_flags;
524
525 return 0;
526 }
527
528 static void
eb_add_vma(struct i915_execbuffer * eb,unsigned int i,unsigned batch_idx,struct i915_vma * vma)529 eb_add_vma(struct i915_execbuffer *eb,
530 unsigned int i, unsigned batch_idx,
531 struct i915_vma *vma)
532 {
533 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
534 struct eb_vma *ev = &eb->vma[i];
535
536 GEM_BUG_ON(i915_vma_is_closed(vma));
537
538 ev->vma = vma;
539 ev->exec = entry;
540 ev->flags = entry->flags;
541
542 if (eb->lut_size > 0) {
543 ev->handle = entry->handle;
544 hlist_add_head(&ev->node,
545 &eb->buckets[hash_32(entry->handle,
546 eb->lut_size)]);
547 }
548
549 if (entry->relocation_count)
550 list_add_tail(&ev->reloc_link, &eb->relocs);
551
552 /*
553 * SNA is doing fancy tricks with compressing batch buffers, which leads
554 * to negative relocation deltas. Usually that works out ok since the
555 * relocate address is still positive, except when the batch is placed
556 * very low in the GTT. Ensure this doesn't happen.
557 *
558 * Note that actual hangs have only been observed on gen7, but for
559 * paranoia do it everywhere.
560 */
561 if (i == batch_idx) {
562 if (entry->relocation_count &&
563 !(ev->flags & EXEC_OBJECT_PINNED))
564 ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
565 if (eb->reloc_cache.has_fence)
566 ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
567
568 eb->batch = ev;
569 }
570 }
571
use_cpu_reloc(const struct reloc_cache * cache,const struct drm_i915_gem_object * obj)572 static inline int use_cpu_reloc(const struct reloc_cache *cache,
573 const struct drm_i915_gem_object *obj)
574 {
575 if (!i915_gem_object_has_struct_page(obj))
576 return false;
577
578 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
579 return true;
580
581 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
582 return false;
583
584 return (cache->has_llc ||
585 obj->cache_dirty ||
586 obj->cache_level != I915_CACHE_NONE);
587 }
588
eb_reserve_vma(struct i915_execbuffer * eb,struct eb_vma * ev,u64 pin_flags)589 static int eb_reserve_vma(struct i915_execbuffer *eb,
590 struct eb_vma *ev,
591 u64 pin_flags)
592 {
593 struct drm_i915_gem_exec_object2 *entry = ev->exec;
594 struct i915_vma *vma = ev->vma;
595 int err;
596
597 if (drm_mm_node_allocated(&vma->node) &&
598 eb_vma_misplaced(entry, vma, ev->flags)) {
599 err = i915_vma_unbind(vma);
600 if (err)
601 return err;
602 }
603
604 err = i915_vma_pin_ww(vma, &eb->ww,
605 entry->pad_to_size, entry->alignment,
606 eb_pin_flags(entry, ev->flags) | pin_flags);
607 if (err)
608 return err;
609
610 if (entry->offset != vma->node.start) {
611 entry->offset = vma->node.start | UPDATE;
612 eb->args->flags |= __EXEC_HAS_RELOC;
613 }
614
615 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
616 err = i915_vma_pin_fence(vma);
617 if (unlikely(err)) {
618 i915_vma_unpin(vma);
619 return err;
620 }
621
622 if (vma->fence)
623 ev->flags |= __EXEC_OBJECT_HAS_FENCE;
624 }
625
626 ev->flags |= __EXEC_OBJECT_HAS_PIN;
627 GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
628
629 return 0;
630 }
631
eb_reserve(struct i915_execbuffer * eb)632 static int eb_reserve(struct i915_execbuffer *eb)
633 {
634 const unsigned int count = eb->buffer_count;
635 unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
636 struct list_head last;
637 struct eb_vma *ev;
638 unsigned int i, pass;
639 int err = 0;
640
641 /*
642 * Attempt to pin all of the buffers into the GTT.
643 * This is done in 3 phases:
644 *
645 * 1a. Unbind all objects that do not match the GTT constraints for
646 * the execbuffer (fenceable, mappable, alignment etc).
647 * 1b. Increment pin count for already bound objects.
648 * 2. Bind new objects.
649 * 3. Decrement pin count.
650 *
651 * This avoid unnecessary unbinding of later objects in order to make
652 * room for the earlier objects *unless* we need to defragment.
653 */
654 pass = 0;
655 do {
656 list_for_each_entry(ev, &eb->unbound, bind_link) {
657 err = eb_reserve_vma(eb, ev, pin_flags);
658 if (err)
659 break;
660 }
661 if (err != -ENOSPC)
662 return err;
663
664 /* Resort *all* the objects into priority order */
665 INIT_LIST_HEAD(&eb->unbound);
666 INIT_LIST_HEAD(&last);
667 for (i = 0; i < count; i++) {
668 unsigned int flags;
669
670 ev = &eb->vma[i];
671 flags = ev->flags;
672 if (flags & EXEC_OBJECT_PINNED &&
673 flags & __EXEC_OBJECT_HAS_PIN)
674 continue;
675
676 eb_unreserve_vma(ev);
677
678 if (flags & EXEC_OBJECT_PINNED)
679 /* Pinned must have their slot */
680 list_add(&ev->bind_link, &eb->unbound);
681 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
682 /* Map require the lowest 256MiB (aperture) */
683 list_add_tail(&ev->bind_link, &eb->unbound);
684 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
685 /* Prioritise 4GiB region for restricted bo */
686 list_add(&ev->bind_link, &last);
687 else
688 list_add_tail(&ev->bind_link, &last);
689 }
690 list_splice_tail(&last, &eb->unbound);
691
692 switch (pass++) {
693 case 0:
694 break;
695
696 case 1:
697 /* Too fragmented, unbind everything and retry */
698 mutex_lock(&eb->context->vm->mutex);
699 err = i915_gem_evict_vm(eb->context->vm);
700 mutex_unlock(&eb->context->vm->mutex);
701 if (err)
702 return err;
703 break;
704
705 default:
706 return -ENOSPC;
707 }
708
709 pin_flags = PIN_USER;
710 } while (1);
711 }
712
eb_batch_index(const struct i915_execbuffer * eb)713 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
714 {
715 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
716 return 0;
717 else
718 return eb->buffer_count - 1;
719 }
720
eb_select_context(struct i915_execbuffer * eb)721 static int eb_select_context(struct i915_execbuffer *eb)
722 {
723 struct i915_gem_context *ctx;
724
725 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
726 if (unlikely(!ctx))
727 return -ENOENT;
728
729 eb->gem_context = ctx;
730 if (rcu_access_pointer(ctx->vm))
731 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
732
733 eb->context_flags = 0;
734 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
735 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
736
737 return 0;
738 }
739
__eb_add_lut(struct i915_execbuffer * eb,u32 handle,struct i915_vma * vma)740 static int __eb_add_lut(struct i915_execbuffer *eb,
741 u32 handle, struct i915_vma *vma)
742 {
743 struct i915_gem_context *ctx = eb->gem_context;
744 struct i915_lut_handle *lut;
745 int err;
746
747 lut = i915_lut_handle_alloc();
748 if (unlikely(!lut))
749 return -ENOMEM;
750
751 i915_vma_get(vma);
752 if (!atomic_fetch_inc(&vma->open_count))
753 i915_vma_reopen(vma);
754 lut->handle = handle;
755 lut->ctx = ctx;
756
757 /* Check that the context hasn't been closed in the meantime */
758 err = -EINTR;
759 if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
760 struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
761
762 if (unlikely(vm && vma->vm != vm))
763 err = -EAGAIN; /* user racing with ctx set-vm */
764 else if (likely(!i915_gem_context_is_closed(ctx)))
765 err = radix_tree_insert(&ctx->handles_vma, handle, vma);
766 else
767 err = -ENOENT;
768 if (err == 0) { /* And nor has this handle */
769 struct drm_i915_gem_object *obj = vma->obj;
770
771 spin_lock(&obj->lut_lock);
772 if (idr_find(&eb->file->object_idr, handle) == obj) {
773 list_add(&lut->obj_link, &obj->lut_list);
774 } else {
775 radix_tree_delete(&ctx->handles_vma, handle);
776 err = -ENOENT;
777 }
778 spin_unlock(&obj->lut_lock);
779 }
780 mutex_unlock(&ctx->lut_mutex);
781 }
782 if (unlikely(err))
783 goto err;
784
785 return 0;
786
787 err:
788 i915_vma_close(vma);
789 i915_vma_put(vma);
790 i915_lut_handle_free(lut);
791 return err;
792 }
793
eb_lookup_vma(struct i915_execbuffer * eb,u32 handle)794 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
795 {
796 struct i915_address_space *vm = eb->context->vm;
797
798 do {
799 struct drm_i915_gem_object *obj;
800 struct i915_vma *vma;
801 int err;
802
803 rcu_read_lock();
804 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
805 if (likely(vma && vma->vm == vm))
806 vma = i915_vma_tryget(vma);
807 rcu_read_unlock();
808 if (likely(vma))
809 return vma;
810
811 obj = i915_gem_object_lookup(eb->file, handle);
812 if (unlikely(!obj))
813 return ERR_PTR(-ENOENT);
814
815 vma = i915_vma_instance(obj, vm, NULL);
816 if (IS_ERR(vma)) {
817 i915_gem_object_put(obj);
818 return vma;
819 }
820
821 err = __eb_add_lut(eb, handle, vma);
822 if (likely(!err))
823 return vma;
824
825 i915_gem_object_put(obj);
826 if (err != -EEXIST)
827 return ERR_PTR(err);
828 } while (1);
829 }
830
eb_lookup_vmas(struct i915_execbuffer * eb)831 static int eb_lookup_vmas(struct i915_execbuffer *eb)
832 {
833 struct drm_i915_private *i915 = eb->i915;
834 unsigned int batch = eb_batch_index(eb);
835 unsigned int i;
836 int err = 0;
837
838 INIT_LIST_HEAD(&eb->relocs);
839
840 for (i = 0; i < eb->buffer_count; i++) {
841 struct i915_vma *vma;
842
843 vma = eb_lookup_vma(eb, eb->exec[i].handle);
844 if (IS_ERR(vma)) {
845 err = PTR_ERR(vma);
846 goto err;
847 }
848
849 err = eb_validate_vma(eb, &eb->exec[i], vma);
850 if (unlikely(err)) {
851 i915_vma_put(vma);
852 goto err;
853 }
854
855 eb_add_vma(eb, i, batch, vma);
856 }
857
858 if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
859 drm_dbg(&i915->drm,
860 "Attempting to use self-modifying batch buffer\n");
861 return -EINVAL;
862 }
863
864 if (range_overflows_t(u64,
865 eb->batch_start_offset, eb->batch_len,
866 eb->batch->vma->size)) {
867 drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
868 return -EINVAL;
869 }
870
871 if (eb->batch_len == 0)
872 eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
873 if (unlikely(eb->batch_len == 0)) { /* impossible! */
874 drm_dbg(&i915->drm, "Invalid batch length\n");
875 return -EINVAL;
876 }
877
878 return 0;
879
880 err:
881 eb->vma[i].vma = NULL;
882 return err;
883 }
884
eb_validate_vmas(struct i915_execbuffer * eb)885 static int eb_validate_vmas(struct i915_execbuffer *eb)
886 {
887 unsigned int i;
888 int err;
889
890 INIT_LIST_HEAD(&eb->unbound);
891
892 for (i = 0; i < eb->buffer_count; i++) {
893 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
894 struct eb_vma *ev = &eb->vma[i];
895 struct i915_vma *vma = ev->vma;
896
897 err = i915_gem_object_lock(vma->obj, &eb->ww);
898 if (err)
899 return err;
900
901 if (eb_pin_vma(eb, entry, ev)) {
902 if (entry->offset != vma->node.start) {
903 entry->offset = vma->node.start | UPDATE;
904 eb->args->flags |= __EXEC_HAS_RELOC;
905 }
906 } else {
907 eb_unreserve_vma(ev);
908
909 list_add_tail(&ev->bind_link, &eb->unbound);
910 if (drm_mm_node_allocated(&vma->node)) {
911 err = i915_vma_unbind(vma);
912 if (err)
913 return err;
914 }
915 }
916
917 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
918 eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
919 }
920
921 if (!list_empty(&eb->unbound))
922 return eb_reserve(eb);
923
924 return 0;
925 }
926
927 static struct eb_vma *
eb_get_vma(const struct i915_execbuffer * eb,unsigned long handle)928 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
929 {
930 if (eb->lut_size < 0) {
931 if (handle >= -eb->lut_size)
932 return NULL;
933 return &eb->vma[handle];
934 } else {
935 struct hlist_head *head;
936 struct eb_vma *ev;
937
938 head = &eb->buckets[hash_32(handle, eb->lut_size)];
939 hlist_for_each_entry(ev, head, node) {
940 if (ev->handle == handle)
941 return ev;
942 }
943 return NULL;
944 }
945 }
946
eb_release_vmas(struct i915_execbuffer * eb,bool final)947 static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
948 {
949 const unsigned int count = eb->buffer_count;
950 unsigned int i;
951
952 for (i = 0; i < count; i++) {
953 struct eb_vma *ev = &eb->vma[i];
954 struct i915_vma *vma = ev->vma;
955
956 if (!vma)
957 break;
958
959 eb_unreserve_vma(ev);
960
961 if (final)
962 i915_vma_put(vma);
963 }
964
965 eb_unpin_engine(eb);
966 }
967
eb_destroy(const struct i915_execbuffer * eb)968 static void eb_destroy(const struct i915_execbuffer *eb)
969 {
970 GEM_BUG_ON(eb->reloc_cache.rq);
971
972 if (eb->lut_size > 0)
973 kfree(eb->buckets);
974 }
975
976 static inline u64
relocation_target(const struct drm_i915_gem_relocation_entry * reloc,const struct i915_vma * target)977 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
978 const struct i915_vma *target)
979 {
980 return gen8_canonical_addr((int)reloc->delta + target->node.start);
981 }
982
reloc_cache_clear(struct reloc_cache * cache)983 static void reloc_cache_clear(struct reloc_cache *cache)
984 {
985 cache->rq = NULL;
986 cache->rq_cmd = NULL;
987 cache->pool = NULL;
988 cache->rq_size = 0;
989 }
990
reloc_cache_init(struct reloc_cache * cache,struct drm_i915_private * i915)991 static void reloc_cache_init(struct reloc_cache *cache,
992 struct drm_i915_private *i915)
993 {
994 cache->page = -1;
995 cache->vaddr = 0;
996 /* Must be a variable in the struct to allow GCC to unroll. */
997 cache->gen = INTEL_GEN(i915);
998 cache->has_llc = HAS_LLC(i915);
999 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
1000 cache->has_fence = cache->gen < 4;
1001 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
1002 cache->node.flags = 0;
1003 reloc_cache_clear(cache);
1004 }
1005
unmask_page(unsigned long p)1006 static inline void *unmask_page(unsigned long p)
1007 {
1008 return (void *)(uintptr_t)(p & PAGE_MASK);
1009 }
1010
unmask_flags(unsigned long p)1011 static inline unsigned int unmask_flags(unsigned long p)
1012 {
1013 return p & ~PAGE_MASK;
1014 }
1015
1016 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
1017
cache_to_ggtt(struct reloc_cache * cache)1018 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
1019 {
1020 struct drm_i915_private *i915 =
1021 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
1022 return &i915->ggtt;
1023 }
1024
reloc_cache_put_pool(struct i915_execbuffer * eb,struct reloc_cache * cache)1025 static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache *cache)
1026 {
1027 if (!cache->pool)
1028 return;
1029
1030 /*
1031 * This is a bit nasty, normally we keep objects locked until the end
1032 * of execbuffer, but we already submit this, and have to unlock before
1033 * dropping the reference. Fortunately we can only hold 1 pool node at
1034 * a time, so this should be harmless.
1035 */
1036 i915_gem_ww_unlock_single(cache->pool->obj);
1037 intel_gt_buffer_pool_put(cache->pool);
1038 cache->pool = NULL;
1039 }
1040
reloc_gpu_flush(struct i915_execbuffer * eb,struct reloc_cache * cache)1041 static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache)
1042 {
1043 struct drm_i915_gem_object *obj = cache->rq->batch->obj;
1044
1045 GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
1046 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
1047
1048 i915_gem_object_flush_map(obj);
1049 i915_gem_object_unpin_map(obj);
1050
1051 intel_gt_chipset_flush(cache->rq->engine->gt);
1052
1053 i915_request_add(cache->rq);
1054 reloc_cache_put_pool(eb, cache);
1055 reloc_cache_clear(cache);
1056
1057 eb->reloc_pool = NULL;
1058 }
1059
reloc_cache_reset(struct reloc_cache * cache,struct i915_execbuffer * eb)1060 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
1061 {
1062 void *vaddr;
1063
1064 if (cache->rq)
1065 reloc_gpu_flush(eb, cache);
1066
1067 if (!cache->vaddr)
1068 return;
1069
1070 vaddr = unmask_page(cache->vaddr);
1071 if (cache->vaddr & KMAP) {
1072 struct drm_i915_gem_object *obj =
1073 (struct drm_i915_gem_object *)cache->node.mm;
1074 if (cache->vaddr & CLFLUSH_AFTER)
1075 mb();
1076
1077 kunmap_atomic(vaddr);
1078 i915_gem_object_finish_access(obj);
1079 } else {
1080 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1081
1082 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1083 io_mapping_unmap_atomic((void __iomem *)vaddr);
1084
1085 if (drm_mm_node_allocated(&cache->node)) {
1086 ggtt->vm.clear_range(&ggtt->vm,
1087 cache->node.start,
1088 cache->node.size);
1089 mutex_lock(&ggtt->vm.mutex);
1090 drm_mm_remove_node(&cache->node);
1091 mutex_unlock(&ggtt->vm.mutex);
1092 } else {
1093 i915_vma_unpin((struct i915_vma *)cache->node.mm);
1094 }
1095 }
1096
1097 cache->vaddr = 0;
1098 cache->page = -1;
1099 }
1100
reloc_kmap(struct drm_i915_gem_object * obj,struct reloc_cache * cache,unsigned long pageno)1101 static void *reloc_kmap(struct drm_i915_gem_object *obj,
1102 struct reloc_cache *cache,
1103 unsigned long pageno)
1104 {
1105 void *vaddr;
1106 struct page *page;
1107
1108 if (cache->vaddr) {
1109 kunmap_atomic(unmask_page(cache->vaddr));
1110 } else {
1111 unsigned int flushes;
1112 int err;
1113
1114 err = i915_gem_object_prepare_write(obj, &flushes);
1115 if (err)
1116 return ERR_PTR(err);
1117
1118 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
1119 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1120
1121 cache->vaddr = flushes | KMAP;
1122 cache->node.mm = (void *)obj;
1123 if (flushes)
1124 mb();
1125 }
1126
1127 page = i915_gem_object_get_page(obj, pageno);
1128 if (!obj->mm.dirty)
1129 set_page_dirty(page);
1130
1131 vaddr = kmap_atomic(page);
1132 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1133 cache->page = pageno;
1134
1135 return vaddr;
1136 }
1137
reloc_iomap(struct drm_i915_gem_object * obj,struct i915_execbuffer * eb,unsigned long page)1138 static void *reloc_iomap(struct drm_i915_gem_object *obj,
1139 struct i915_execbuffer *eb,
1140 unsigned long page)
1141 {
1142 struct reloc_cache *cache = &eb->reloc_cache;
1143 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1144 unsigned long offset;
1145 void *vaddr;
1146
1147 if (cache->vaddr) {
1148 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1149 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1150 } else {
1151 struct i915_vma *vma;
1152 int err;
1153
1154 if (i915_gem_object_is_tiled(obj))
1155 return ERR_PTR(-EINVAL);
1156
1157 if (use_cpu_reloc(cache, obj))
1158 return NULL;
1159
1160 err = i915_gem_object_set_to_gtt_domain(obj, true);
1161 if (err)
1162 return ERR_PTR(err);
1163
1164 vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
1165 PIN_MAPPABLE |
1166 PIN_NONBLOCK /* NOWARN */ |
1167 PIN_NOEVICT);
1168 if (vma == ERR_PTR(-EDEADLK))
1169 return vma;
1170
1171 if (IS_ERR(vma)) {
1172 memset(&cache->node, 0, sizeof(cache->node));
1173 mutex_lock(&ggtt->vm.mutex);
1174 err = drm_mm_insert_node_in_range
1175 (&ggtt->vm.mm, &cache->node,
1176 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1177 0, ggtt->mappable_end,
1178 DRM_MM_INSERT_LOW);
1179 mutex_unlock(&ggtt->vm.mutex);
1180 if (err) /* no inactive aperture space, use cpu reloc */
1181 return NULL;
1182 } else {
1183 cache->node.start = vma->node.start;
1184 cache->node.mm = (void *)vma;
1185 }
1186 }
1187
1188 offset = cache->node.start;
1189 if (drm_mm_node_allocated(&cache->node)) {
1190 ggtt->vm.insert_page(&ggtt->vm,
1191 i915_gem_object_get_dma_address(obj, page),
1192 offset, I915_CACHE_NONE, 0);
1193 } else {
1194 offset += page << PAGE_SHIFT;
1195 }
1196
1197 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1198 offset);
1199 cache->page = page;
1200 cache->vaddr = (unsigned long)vaddr;
1201
1202 return vaddr;
1203 }
1204
reloc_vaddr(struct drm_i915_gem_object * obj,struct i915_execbuffer * eb,unsigned long page)1205 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1206 struct i915_execbuffer *eb,
1207 unsigned long page)
1208 {
1209 struct reloc_cache *cache = &eb->reloc_cache;
1210 void *vaddr;
1211
1212 if (cache->page == page) {
1213 vaddr = unmask_page(cache->vaddr);
1214 } else {
1215 vaddr = NULL;
1216 if ((cache->vaddr & KMAP) == 0)
1217 vaddr = reloc_iomap(obj, eb, page);
1218 if (!vaddr)
1219 vaddr = reloc_kmap(obj, cache, page);
1220 }
1221
1222 return vaddr;
1223 }
1224
clflush_write32(u32 * addr,u32 value,unsigned int flushes)1225 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1226 {
1227 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1228 if (flushes & CLFLUSH_BEFORE) {
1229 clflushopt(addr);
1230 mb();
1231 }
1232
1233 *addr = value;
1234
1235 /*
1236 * Writes to the same cacheline are serialised by the CPU
1237 * (including clflush). On the write path, we only require
1238 * that it hits memory in an orderly fashion and place
1239 * mb barriers at the start and end of the relocation phase
1240 * to ensure ordering of clflush wrt to the system.
1241 */
1242 if (flushes & CLFLUSH_AFTER)
1243 clflushopt(addr);
1244 } else
1245 *addr = value;
1246 }
1247
reloc_move_to_gpu(struct i915_request * rq,struct i915_vma * vma)1248 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1249 {
1250 struct drm_i915_gem_object *obj = vma->obj;
1251 int err;
1252
1253 assert_vma_held(vma);
1254
1255 if (obj->cache_dirty & ~obj->cache_coherent)
1256 i915_gem_clflush_object(obj, 0);
1257 obj->write_domain = 0;
1258
1259 err = i915_request_await_object(rq, vma->obj, true);
1260 if (err == 0)
1261 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1262
1263 return err;
1264 }
1265
__reloc_gpu_alloc(struct i915_execbuffer * eb,struct intel_engine_cs * engine,struct i915_vma * vma,unsigned int len)1266 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1267 struct intel_engine_cs *engine,
1268 struct i915_vma *vma,
1269 unsigned int len)
1270 {
1271 struct reloc_cache *cache = &eb->reloc_cache;
1272 struct intel_gt_buffer_pool_node *pool = eb->reloc_pool;
1273 struct i915_request *rq;
1274 struct i915_vma *batch;
1275 u32 *cmd;
1276 int err;
1277
1278 if (!pool) {
1279 pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
1280 if (IS_ERR(pool))
1281 return PTR_ERR(pool);
1282 }
1283 eb->reloc_pool = NULL;
1284
1285 err = i915_gem_object_lock(pool->obj, &eb->ww);
1286 if (err)
1287 goto err_pool;
1288
1289 cmd = i915_gem_object_pin_map(pool->obj,
1290 cache->has_llc ?
1291 I915_MAP_FORCE_WB :
1292 I915_MAP_FORCE_WC);
1293 if (IS_ERR(cmd)) {
1294 err = PTR_ERR(cmd);
1295 goto err_pool;
1296 }
1297
1298 memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
1299
1300 batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1301 if (IS_ERR(batch)) {
1302 err = PTR_ERR(batch);
1303 goto err_unmap;
1304 }
1305
1306 err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
1307 if (err)
1308 goto err_unmap;
1309
1310 if (engine == eb->context->engine) {
1311 rq = i915_request_create(eb->context);
1312 } else {
1313 struct intel_context *ce = eb->reloc_context;
1314
1315 if (!ce) {
1316 ce = intel_context_create(engine);
1317 if (IS_ERR(ce)) {
1318 err = PTR_ERR(ce);
1319 goto err_unpin;
1320 }
1321
1322 i915_vm_put(ce->vm);
1323 ce->vm = i915_vm_get(eb->context->vm);
1324 eb->reloc_context = ce;
1325 }
1326
1327 err = intel_context_pin_ww(ce, &eb->ww);
1328 if (err)
1329 goto err_unpin;
1330
1331 rq = i915_request_create(ce);
1332 intel_context_unpin(ce);
1333 }
1334 if (IS_ERR(rq)) {
1335 err = PTR_ERR(rq);
1336 goto err_unpin;
1337 }
1338
1339 err = intel_gt_buffer_pool_mark_active(pool, rq);
1340 if (err)
1341 goto err_request;
1342
1343 err = reloc_move_to_gpu(rq, vma);
1344 if (err)
1345 goto err_request;
1346
1347 err = eb->engine->emit_bb_start(rq,
1348 batch->node.start, PAGE_SIZE,
1349 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1350 if (err)
1351 goto skip_request;
1352
1353 assert_vma_held(batch);
1354 err = i915_request_await_object(rq, batch->obj, false);
1355 if (err == 0)
1356 err = i915_vma_move_to_active(batch, rq, 0);
1357 if (err)
1358 goto skip_request;
1359
1360 rq->batch = batch;
1361 i915_vma_unpin(batch);
1362
1363 cache->rq = rq;
1364 cache->rq_cmd = cmd;
1365 cache->rq_size = 0;
1366 cache->pool = pool;
1367
1368 /* Return with batch mapping (cmd) still pinned */
1369 return 0;
1370
1371 skip_request:
1372 i915_request_set_error_once(rq, err);
1373 err_request:
1374 i915_request_add(rq);
1375 err_unpin:
1376 i915_vma_unpin(batch);
1377 err_unmap:
1378 i915_gem_object_unpin_map(pool->obj);
1379 err_pool:
1380 eb->reloc_pool = pool;
1381 return err;
1382 }
1383
reloc_can_use_engine(const struct intel_engine_cs * engine)1384 static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
1385 {
1386 return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
1387 }
1388
reloc_gpu(struct i915_execbuffer * eb,struct i915_vma * vma,unsigned int len)1389 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1390 struct i915_vma *vma,
1391 unsigned int len)
1392 {
1393 struct reloc_cache *cache = &eb->reloc_cache;
1394 u32 *cmd;
1395
1396 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1397 reloc_gpu_flush(eb, cache);
1398
1399 if (unlikely(!cache->rq)) {
1400 int err;
1401 struct intel_engine_cs *engine = eb->engine;
1402
1403 /* If we need to copy for the cmdparser, we will stall anyway */
1404 if (eb_use_cmdparser(eb))
1405 return ERR_PTR(-EWOULDBLOCK);
1406
1407 if (!reloc_can_use_engine(engine)) {
1408 engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
1409 if (!engine)
1410 return ERR_PTR(-ENODEV);
1411 }
1412
1413 err = __reloc_gpu_alloc(eb, engine, vma, len);
1414 if (unlikely(err))
1415 return ERR_PTR(err);
1416 }
1417
1418 cmd = cache->rq_cmd + cache->rq_size;
1419 cache->rq_size += len;
1420
1421 return cmd;
1422 }
1423
use_reloc_gpu(struct i915_vma * vma)1424 static inline bool use_reloc_gpu(struct i915_vma *vma)
1425 {
1426 if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
1427 return true;
1428
1429 if (DBG_FORCE_RELOC)
1430 return false;
1431
1432 return !dma_resv_test_signaled_rcu(vma->resv, true);
1433 }
1434
vma_phys_addr(struct i915_vma * vma,u32 offset)1435 static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
1436 {
1437 struct page *page;
1438 unsigned long addr;
1439
1440 GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
1441
1442 page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
1443 addr = PFN_PHYS(page_to_pfn(page));
1444 GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
1445
1446 return addr + offset_in_page(offset);
1447 }
1448
__reloc_entry_gpu(struct i915_execbuffer * eb,struct i915_vma * vma,u64 offset,u64 target_addr)1449 static int __reloc_entry_gpu(struct i915_execbuffer *eb,
1450 struct i915_vma *vma,
1451 u64 offset,
1452 u64 target_addr)
1453 {
1454 const unsigned int gen = eb->reloc_cache.gen;
1455 unsigned int len;
1456 u32 *batch;
1457 u64 addr;
1458
1459 if (gen >= 8)
1460 len = offset & 7 ? 8 : 5;
1461 else if (gen >= 4)
1462 len = 4;
1463 else
1464 len = 3;
1465
1466 batch = reloc_gpu(eb, vma, len);
1467 if (batch == ERR_PTR(-EDEADLK))
1468 return -EDEADLK;
1469 else if (IS_ERR(batch))
1470 return false;
1471
1472 addr = gen8_canonical_addr(vma->node.start + offset);
1473 if (gen >= 8) {
1474 if (offset & 7) {
1475 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1476 *batch++ = lower_32_bits(addr);
1477 *batch++ = upper_32_bits(addr);
1478 *batch++ = lower_32_bits(target_addr);
1479
1480 addr = gen8_canonical_addr(addr + 4);
1481
1482 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1483 *batch++ = lower_32_bits(addr);
1484 *batch++ = upper_32_bits(addr);
1485 *batch++ = upper_32_bits(target_addr);
1486 } else {
1487 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1488 *batch++ = lower_32_bits(addr);
1489 *batch++ = upper_32_bits(addr);
1490 *batch++ = lower_32_bits(target_addr);
1491 *batch++ = upper_32_bits(target_addr);
1492 }
1493 } else if (gen >= 6) {
1494 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1495 *batch++ = 0;
1496 *batch++ = addr;
1497 *batch++ = target_addr;
1498 } else if (IS_I965G(eb->i915)) {
1499 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1500 *batch++ = 0;
1501 *batch++ = vma_phys_addr(vma, offset);
1502 *batch++ = target_addr;
1503 } else if (gen >= 4) {
1504 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1505 *batch++ = 0;
1506 *batch++ = addr;
1507 *batch++ = target_addr;
1508 } else if (gen >= 3 &&
1509 !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
1510 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1511 *batch++ = addr;
1512 *batch++ = target_addr;
1513 } else {
1514 *batch++ = MI_STORE_DWORD_IMM;
1515 *batch++ = vma_phys_addr(vma, offset);
1516 *batch++ = target_addr;
1517 }
1518
1519 return true;
1520 }
1521
reloc_entry_gpu(struct i915_execbuffer * eb,struct i915_vma * vma,u64 offset,u64 target_addr)1522 static int reloc_entry_gpu(struct i915_execbuffer *eb,
1523 struct i915_vma *vma,
1524 u64 offset,
1525 u64 target_addr)
1526 {
1527 if (eb->reloc_cache.vaddr)
1528 return false;
1529
1530 if (!use_reloc_gpu(vma))
1531 return false;
1532
1533 return __reloc_entry_gpu(eb, vma, offset, target_addr);
1534 }
1535
1536 static u64
relocate_entry(struct i915_vma * vma,const struct drm_i915_gem_relocation_entry * reloc,struct i915_execbuffer * eb,const struct i915_vma * target)1537 relocate_entry(struct i915_vma *vma,
1538 const struct drm_i915_gem_relocation_entry *reloc,
1539 struct i915_execbuffer *eb,
1540 const struct i915_vma *target)
1541 {
1542 u64 target_addr = relocation_target(reloc, target);
1543 u64 offset = reloc->offset;
1544 int reloc_gpu = reloc_entry_gpu(eb, vma, offset, target_addr);
1545
1546 if (reloc_gpu < 0)
1547 return reloc_gpu;
1548
1549 if (!reloc_gpu) {
1550 bool wide = eb->reloc_cache.use_64bit_reloc;
1551 void *vaddr;
1552
1553 repeat:
1554 vaddr = reloc_vaddr(vma->obj, eb,
1555 offset >> PAGE_SHIFT);
1556 if (IS_ERR(vaddr))
1557 return PTR_ERR(vaddr);
1558
1559 GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
1560 clflush_write32(vaddr + offset_in_page(offset),
1561 lower_32_bits(target_addr),
1562 eb->reloc_cache.vaddr);
1563
1564 if (wide) {
1565 offset += sizeof(u32);
1566 target_addr >>= 32;
1567 wide = false;
1568 goto repeat;
1569 }
1570 }
1571
1572 return target->node.start | UPDATE;
1573 }
1574
1575 static u64
eb_relocate_entry(struct i915_execbuffer * eb,struct eb_vma * ev,const struct drm_i915_gem_relocation_entry * reloc)1576 eb_relocate_entry(struct i915_execbuffer *eb,
1577 struct eb_vma *ev,
1578 const struct drm_i915_gem_relocation_entry *reloc)
1579 {
1580 struct drm_i915_private *i915 = eb->i915;
1581 struct eb_vma *target;
1582 int err;
1583
1584 /* we've already hold a reference to all valid objects */
1585 target = eb_get_vma(eb, reloc->target_handle);
1586 if (unlikely(!target))
1587 return -ENOENT;
1588
1589 /* Validate that the target is in a valid r/w GPU domain */
1590 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1591 drm_dbg(&i915->drm, "reloc with multiple write domains: "
1592 "target %d offset %d "
1593 "read %08x write %08x",
1594 reloc->target_handle,
1595 (int) reloc->offset,
1596 reloc->read_domains,
1597 reloc->write_domain);
1598 return -EINVAL;
1599 }
1600 if (unlikely((reloc->write_domain | reloc->read_domains)
1601 & ~I915_GEM_GPU_DOMAINS)) {
1602 drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1603 "target %d offset %d "
1604 "read %08x write %08x",
1605 reloc->target_handle,
1606 (int) reloc->offset,
1607 reloc->read_domains,
1608 reloc->write_domain);
1609 return -EINVAL;
1610 }
1611
1612 if (reloc->write_domain) {
1613 target->flags |= EXEC_OBJECT_WRITE;
1614
1615 /*
1616 * Sandybridge PPGTT errata: We need a global gtt mapping
1617 * for MI and pipe_control writes because the gpu doesn't
1618 * properly redirect them through the ppgtt for non_secure
1619 * batchbuffers.
1620 */
1621 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1622 IS_GEN(eb->i915, 6)) {
1623 err = i915_vma_bind(target->vma,
1624 target->vma->obj->cache_level,
1625 PIN_GLOBAL, NULL);
1626 if (err)
1627 return err;
1628 }
1629 }
1630
1631 /*
1632 * If the relocation already has the right value in it, no
1633 * more work needs to be done.
1634 */
1635 if (!DBG_FORCE_RELOC &&
1636 gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1637 return 0;
1638
1639 /* Check that the relocation address is valid... */
1640 if (unlikely(reloc->offset >
1641 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1642 drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1643 "target %d offset %d size %d.\n",
1644 reloc->target_handle,
1645 (int)reloc->offset,
1646 (int)ev->vma->size);
1647 return -EINVAL;
1648 }
1649 if (unlikely(reloc->offset & 3)) {
1650 drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1651 "target %d offset %d.\n",
1652 reloc->target_handle,
1653 (int)reloc->offset);
1654 return -EINVAL;
1655 }
1656
1657 /*
1658 * If we write into the object, we need to force the synchronisation
1659 * barrier, either with an asynchronous clflush or if we executed the
1660 * patching using the GPU (though that should be serialised by the
1661 * timeline). To be completely sure, and since we are required to
1662 * do relocations we are already stalling, disable the user's opt
1663 * out of our synchronisation.
1664 */
1665 ev->flags &= ~EXEC_OBJECT_ASYNC;
1666
1667 /* and update the user's relocation entry */
1668 return relocate_entry(ev->vma, reloc, eb, target->vma);
1669 }
1670
eb_relocate_vma(struct i915_execbuffer * eb,struct eb_vma * ev)1671 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1672 {
1673 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1674 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1675 const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1676 struct drm_i915_gem_relocation_entry __user *urelocs =
1677 u64_to_user_ptr(entry->relocs_ptr);
1678 unsigned long remain = entry->relocation_count;
1679
1680 if (unlikely(remain > N_RELOC(ULONG_MAX)))
1681 return -EINVAL;
1682
1683 /*
1684 * We must check that the entire relocation array is safe
1685 * to read. However, if the array is not writable the user loses
1686 * the updated relocation values.
1687 */
1688 if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
1689 return -EFAULT;
1690
1691 do {
1692 struct drm_i915_gem_relocation_entry *r = stack;
1693 unsigned int count =
1694 min_t(unsigned long, remain, ARRAY_SIZE(stack));
1695 unsigned int copied;
1696
1697 /*
1698 * This is the fast path and we cannot handle a pagefault
1699 * whilst holding the struct mutex lest the user pass in the
1700 * relocations contained within a mmaped bo. For in such a case
1701 * we, the page fault handler would call i915_gem_fault() and
1702 * we would try to acquire the struct mutex again. Obviously
1703 * this is bad and so lockdep complains vehemently.
1704 */
1705 pagefault_disable();
1706 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1707 pagefault_enable();
1708 if (unlikely(copied)) {
1709 remain = -EFAULT;
1710 goto out;
1711 }
1712
1713 remain -= count;
1714 do {
1715 u64 offset = eb_relocate_entry(eb, ev, r);
1716
1717 if (likely(offset == 0)) {
1718 } else if ((s64)offset < 0) {
1719 remain = (int)offset;
1720 goto out;
1721 } else {
1722 /*
1723 * Note that reporting an error now
1724 * leaves everything in an inconsistent
1725 * state as we have *already* changed
1726 * the relocation value inside the
1727 * object. As we have not changed the
1728 * reloc.presumed_offset or will not
1729 * change the execobject.offset, on the
1730 * call we may not rewrite the value
1731 * inside the object, leaving it
1732 * dangling and causing a GPU hang. Unless
1733 * userspace dynamically rebuilds the
1734 * relocations on each execbuf rather than
1735 * presume a static tree.
1736 *
1737 * We did previously check if the relocations
1738 * were writable (access_ok), an error now
1739 * would be a strange race with mprotect,
1740 * having already demonstrated that we
1741 * can read from this userspace address.
1742 */
1743 offset = gen8_canonical_addr(offset & ~UPDATE);
1744 __put_user(offset,
1745 &urelocs[r - stack].presumed_offset);
1746 }
1747 } while (r++, --count);
1748 urelocs += ARRAY_SIZE(stack);
1749 } while (remain);
1750 out:
1751 reloc_cache_reset(&eb->reloc_cache, eb);
1752 return remain;
1753 }
1754
1755 static int
eb_relocate_vma_slow(struct i915_execbuffer * eb,struct eb_vma * ev)1756 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
1757 {
1758 const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1759 struct drm_i915_gem_relocation_entry *relocs =
1760 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1761 unsigned int i;
1762 int err;
1763
1764 for (i = 0; i < entry->relocation_count; i++) {
1765 u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
1766
1767 if ((s64)offset < 0) {
1768 err = (int)offset;
1769 goto err;
1770 }
1771 }
1772 err = 0;
1773 err:
1774 reloc_cache_reset(&eb->reloc_cache, eb);
1775 return err;
1776 }
1777
check_relocations(const struct drm_i915_gem_exec_object2 * entry)1778 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1779 {
1780 const char __user *addr, *end;
1781 unsigned long size;
1782 char __maybe_unused c;
1783
1784 size = entry->relocation_count;
1785 if (size == 0)
1786 return 0;
1787
1788 if (size > N_RELOC(ULONG_MAX))
1789 return -EINVAL;
1790
1791 addr = u64_to_user_ptr(entry->relocs_ptr);
1792 size *= sizeof(struct drm_i915_gem_relocation_entry);
1793 if (!access_ok(addr, size))
1794 return -EFAULT;
1795
1796 end = addr + size;
1797 for (; addr < end; addr += PAGE_SIZE) {
1798 int err = __get_user(c, addr);
1799 if (err)
1800 return err;
1801 }
1802 return __get_user(c, end - 1);
1803 }
1804
eb_copy_relocations(const struct i915_execbuffer * eb)1805 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1806 {
1807 struct drm_i915_gem_relocation_entry *relocs;
1808 const unsigned int count = eb->buffer_count;
1809 unsigned int i;
1810 int err;
1811
1812 for (i = 0; i < count; i++) {
1813 const unsigned int nreloc = eb->exec[i].relocation_count;
1814 struct drm_i915_gem_relocation_entry __user *urelocs;
1815 unsigned long size;
1816 unsigned long copied;
1817
1818 if (nreloc == 0)
1819 continue;
1820
1821 err = check_relocations(&eb->exec[i]);
1822 if (err)
1823 goto err;
1824
1825 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1826 size = nreloc * sizeof(*relocs);
1827
1828 relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1829 if (!relocs) {
1830 err = -ENOMEM;
1831 goto err;
1832 }
1833
1834 /* copy_from_user is limited to < 4GiB */
1835 copied = 0;
1836 do {
1837 unsigned int len =
1838 min_t(u64, BIT_ULL(31), size - copied);
1839
1840 if (__copy_from_user((char *)relocs + copied,
1841 (char __user *)urelocs + copied,
1842 len))
1843 goto end;
1844
1845 copied += len;
1846 } while (copied < size);
1847
1848 /*
1849 * As we do not update the known relocation offsets after
1850 * relocating (due to the complexities in lock handling),
1851 * we need to mark them as invalid now so that we force the
1852 * relocation processing next time. Just in case the target
1853 * object is evicted and then rebound into its old
1854 * presumed_offset before the next execbuffer - if that
1855 * happened we would make the mistake of assuming that the
1856 * relocations were valid.
1857 */
1858 if (!user_access_begin(urelocs, size))
1859 goto end;
1860
1861 for (copied = 0; copied < nreloc; copied++)
1862 unsafe_put_user(-1,
1863 &urelocs[copied].presumed_offset,
1864 end_user);
1865 user_access_end();
1866
1867 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1868 }
1869
1870 return 0;
1871
1872 end_user:
1873 user_access_end();
1874 end:
1875 kvfree(relocs);
1876 err = -EFAULT;
1877 err:
1878 while (i--) {
1879 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1880 if (eb->exec[i].relocation_count)
1881 kvfree(relocs);
1882 }
1883 return err;
1884 }
1885
eb_prefault_relocations(const struct i915_execbuffer * eb)1886 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1887 {
1888 const unsigned int count = eb->buffer_count;
1889 unsigned int i;
1890
1891 for (i = 0; i < count; i++) {
1892 int err;
1893
1894 err = check_relocations(&eb->exec[i]);
1895 if (err)
1896 return err;
1897 }
1898
1899 return 0;
1900 }
1901
eb_relocate_parse_slow(struct i915_execbuffer * eb,struct i915_request * rq)1902 static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
1903 struct i915_request *rq)
1904 {
1905 bool have_copy = false;
1906 struct eb_vma *ev;
1907 int err = 0;
1908
1909 repeat:
1910 if (signal_pending(current)) {
1911 err = -ERESTARTSYS;
1912 goto out;
1913 }
1914
1915 /* We may process another execbuffer during the unlock... */
1916 eb_release_vmas(eb, false);
1917 i915_gem_ww_ctx_fini(&eb->ww);
1918
1919 if (rq) {
1920 /* nonblocking is always false */
1921 if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
1922 MAX_SCHEDULE_TIMEOUT) < 0) {
1923 i915_request_put(rq);
1924 rq = NULL;
1925
1926 err = -EINTR;
1927 goto err_relock;
1928 }
1929
1930 i915_request_put(rq);
1931 rq = NULL;
1932 }
1933
1934 /*
1935 * We take 3 passes through the slowpatch.
1936 *
1937 * 1 - we try to just prefault all the user relocation entries and
1938 * then attempt to reuse the atomic pagefault disabled fast path again.
1939 *
1940 * 2 - we copy the user entries to a local buffer here outside of the
1941 * local and allow ourselves to wait upon any rendering before
1942 * relocations
1943 *
1944 * 3 - we already have a local copy of the relocation entries, but
1945 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1946 */
1947 if (!err) {
1948 err = eb_prefault_relocations(eb);
1949 } else if (!have_copy) {
1950 err = eb_copy_relocations(eb);
1951 have_copy = err == 0;
1952 } else {
1953 cond_resched();
1954 err = 0;
1955 }
1956
1957 if (!err)
1958 flush_workqueue(eb->i915->mm.userptr_wq);
1959
1960 err_relock:
1961 i915_gem_ww_ctx_init(&eb->ww, true);
1962 if (err)
1963 goto out;
1964
1965 /* reacquire the objects */
1966 repeat_validate:
1967 rq = eb_pin_engine(eb, false);
1968 if (IS_ERR(rq)) {
1969 err = PTR_ERR(rq);
1970 rq = NULL;
1971 goto err;
1972 }
1973
1974 /* We didn't throttle, should be NULL */
1975 GEM_WARN_ON(rq);
1976
1977 err = eb_validate_vmas(eb);
1978 if (err)
1979 goto err;
1980
1981 GEM_BUG_ON(!eb->batch);
1982
1983 list_for_each_entry(ev, &eb->relocs, reloc_link) {
1984 if (!have_copy) {
1985 pagefault_disable();
1986 err = eb_relocate_vma(eb, ev);
1987 pagefault_enable();
1988 if (err)
1989 break;
1990 } else {
1991 err = eb_relocate_vma_slow(eb, ev);
1992 if (err)
1993 break;
1994 }
1995 }
1996
1997 if (err == -EDEADLK)
1998 goto err;
1999
2000 if (err && !have_copy)
2001 goto repeat;
2002
2003 if (err)
2004 goto err;
2005
2006 /* as last step, parse the command buffer */
2007 err = eb_parse(eb);
2008 if (err)
2009 goto err;
2010
2011 /*
2012 * Leave the user relocations as are, this is the painfully slow path,
2013 * and we want to avoid the complication of dropping the lock whilst
2014 * having buffers reserved in the aperture and so causing spurious
2015 * ENOSPC for random operations.
2016 */
2017
2018 err:
2019 if (err == -EDEADLK) {
2020 eb_release_vmas(eb, false);
2021 err = i915_gem_ww_ctx_backoff(&eb->ww);
2022 if (!err)
2023 goto repeat_validate;
2024 }
2025
2026 if (err == -EAGAIN)
2027 goto repeat;
2028
2029 out:
2030 if (have_copy) {
2031 const unsigned int count = eb->buffer_count;
2032 unsigned int i;
2033
2034 for (i = 0; i < count; i++) {
2035 const struct drm_i915_gem_exec_object2 *entry =
2036 &eb->exec[i];
2037 struct drm_i915_gem_relocation_entry *relocs;
2038
2039 if (!entry->relocation_count)
2040 continue;
2041
2042 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
2043 kvfree(relocs);
2044 }
2045 }
2046
2047 if (rq)
2048 i915_request_put(rq);
2049
2050 return err;
2051 }
2052
eb_relocate_parse(struct i915_execbuffer * eb)2053 static int eb_relocate_parse(struct i915_execbuffer *eb)
2054 {
2055 int err;
2056 struct i915_request *rq = NULL;
2057 bool throttle = true;
2058
2059 retry:
2060 rq = eb_pin_engine(eb, throttle);
2061 if (IS_ERR(rq)) {
2062 err = PTR_ERR(rq);
2063 rq = NULL;
2064 if (err != -EDEADLK)
2065 return err;
2066
2067 goto err;
2068 }
2069
2070 if (rq) {
2071 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
2072
2073 /* Need to drop all locks now for throttling, take slowpath */
2074 err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0);
2075 if (err == -ETIME) {
2076 if (nonblock) {
2077 err = -EWOULDBLOCK;
2078 i915_request_put(rq);
2079 goto err;
2080 }
2081 goto slow;
2082 }
2083 i915_request_put(rq);
2084 rq = NULL;
2085 }
2086
2087 /* only throttle once, even if we didn't need to throttle */
2088 throttle = false;
2089
2090 err = eb_validate_vmas(eb);
2091 if (err == -EAGAIN)
2092 goto slow;
2093 else if (err)
2094 goto err;
2095
2096 /* The objects are in their final locations, apply the relocations. */
2097 if (eb->args->flags & __EXEC_HAS_RELOC) {
2098 struct eb_vma *ev;
2099
2100 list_for_each_entry(ev, &eb->relocs, reloc_link) {
2101 err = eb_relocate_vma(eb, ev);
2102 if (err)
2103 break;
2104 }
2105
2106 if (err == -EDEADLK)
2107 goto err;
2108 else if (err)
2109 goto slow;
2110 }
2111
2112 if (!err)
2113 err = eb_parse(eb);
2114
2115 err:
2116 if (err == -EDEADLK) {
2117 eb_release_vmas(eb, false);
2118 err = i915_gem_ww_ctx_backoff(&eb->ww);
2119 if (!err)
2120 goto retry;
2121 }
2122
2123 return err;
2124
2125 slow:
2126 err = eb_relocate_parse_slow(eb, rq);
2127 if (err)
2128 /*
2129 * If the user expects the execobject.offset and
2130 * reloc.presumed_offset to be an exact match,
2131 * as for using NO_RELOC, then we cannot update
2132 * the execobject.offset until we have completed
2133 * relocation.
2134 */
2135 eb->args->flags &= ~__EXEC_HAS_RELOC;
2136
2137 return err;
2138 }
2139
eb_move_to_gpu(struct i915_execbuffer * eb)2140 static int eb_move_to_gpu(struct i915_execbuffer *eb)
2141 {
2142 const unsigned int count = eb->buffer_count;
2143 unsigned int i = count;
2144 int err = 0;
2145
2146 while (i--) {
2147 struct eb_vma *ev = &eb->vma[i];
2148 struct i915_vma *vma = ev->vma;
2149 unsigned int flags = ev->flags;
2150 struct drm_i915_gem_object *obj = vma->obj;
2151
2152 assert_vma_held(vma);
2153
2154 if (flags & EXEC_OBJECT_CAPTURE) {
2155 struct i915_capture_list *capture;
2156
2157 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
2158 if (capture) {
2159 capture->next = eb->request->capture_list;
2160 capture->vma = vma;
2161 eb->request->capture_list = capture;
2162 }
2163 }
2164
2165 /*
2166 * If the GPU is not _reading_ through the CPU cache, we need
2167 * to make sure that any writes (both previous GPU writes from
2168 * before a change in snooping levels and normal CPU writes)
2169 * caught in that cache are flushed to main memory.
2170 *
2171 * We want to say
2172 * obj->cache_dirty &&
2173 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
2174 * but gcc's optimiser doesn't handle that as well and emits
2175 * two jumps instead of one. Maybe one day...
2176 */
2177 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
2178 if (i915_gem_clflush_object(obj, 0))
2179 flags &= ~EXEC_OBJECT_ASYNC;
2180 }
2181
2182 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
2183 err = i915_request_await_object
2184 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
2185 }
2186
2187 if (err == 0)
2188 err = i915_vma_move_to_active(vma, eb->request, flags);
2189 }
2190
2191 if (unlikely(err))
2192 goto err_skip;
2193
2194 /* Unconditionally flush any chipset caches (for streaming writes). */
2195 intel_gt_chipset_flush(eb->engine->gt);
2196 return 0;
2197
2198 err_skip:
2199 i915_request_set_error_once(eb->request, err);
2200 return err;
2201 }
2202
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 * exec)2203 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
2204 {
2205 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
2206 return -EINVAL;
2207
2208 /* Kernel clipping was a DRI1 misfeature */
2209 if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
2210 I915_EXEC_USE_EXTENSIONS))) {
2211 if (exec->num_cliprects || exec->cliprects_ptr)
2212 return -EINVAL;
2213 }
2214
2215 if (exec->DR4 == 0xffffffff) {
2216 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
2217 exec->DR4 = 0;
2218 }
2219 if (exec->DR1 || exec->DR4)
2220 return -EINVAL;
2221
2222 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
2223 return -EINVAL;
2224
2225 return 0;
2226 }
2227
i915_reset_gen7_sol_offsets(struct i915_request * rq)2228 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
2229 {
2230 u32 *cs;
2231 int i;
2232
2233 if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
2234 drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
2235 return -EINVAL;
2236 }
2237
2238 cs = intel_ring_begin(rq, 4 * 2 + 2);
2239 if (IS_ERR(cs))
2240 return PTR_ERR(cs);
2241
2242 *cs++ = MI_LOAD_REGISTER_IMM(4);
2243 for (i = 0; i < 4; i++) {
2244 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
2245 *cs++ = 0;
2246 }
2247 *cs++ = MI_NOOP;
2248 intel_ring_advance(rq, cs);
2249
2250 return 0;
2251 }
2252
2253 static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer * eb,struct drm_i915_gem_object * obj,struct i915_address_space * vm,unsigned int flags)2254 shadow_batch_pin(struct i915_execbuffer *eb,
2255 struct drm_i915_gem_object *obj,
2256 struct i915_address_space *vm,
2257 unsigned int flags)
2258 {
2259 struct i915_vma *vma;
2260 int err;
2261
2262 vma = i915_vma_instance(obj, vm, NULL);
2263 if (IS_ERR(vma))
2264 return vma;
2265
2266 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
2267 if (err)
2268 return ERR_PTR(err);
2269
2270 return vma;
2271 }
2272
eb_dispatch_secure(struct i915_execbuffer * eb,struct i915_vma * vma)2273 static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
2274 {
2275 /*
2276 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2277 * batch" bit. Hence we need to pin secure batches into the global gtt.
2278 * hsw should have this fixed, but bdw mucks it up again. */
2279 if (eb->batch_flags & I915_DISPATCH_SECURE)
2280 return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
2281
2282 return NULL;
2283 }
2284
eb_parse(struct i915_execbuffer * eb)2285 static int eb_parse(struct i915_execbuffer *eb)
2286 {
2287 struct drm_i915_private *i915 = eb->i915;
2288 struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
2289 struct i915_vma *shadow, *trampoline, *batch;
2290 unsigned long len;
2291 int err;
2292
2293 if (!eb_use_cmdparser(eb)) {
2294 batch = eb_dispatch_secure(eb, eb->batch->vma);
2295 if (IS_ERR(batch))
2296 return PTR_ERR(batch);
2297
2298 goto secure_batch;
2299 }
2300
2301 len = eb->batch_len;
2302 if (!CMDPARSER_USES_GGTT(eb->i915)) {
2303 /*
2304 * ppGTT backed shadow buffers must be mapped RO, to prevent
2305 * post-scan tampering
2306 */
2307 if (!eb->context->vm->has_read_only) {
2308 drm_dbg(&i915->drm,
2309 "Cannot prevent post-scan tampering without RO capable vm\n");
2310 return -EINVAL;
2311 }
2312 } else {
2313 len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
2314 }
2315 if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */
2316 return -EINVAL;
2317
2318 if (!pool) {
2319 pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
2320 if (IS_ERR(pool))
2321 return PTR_ERR(pool);
2322 eb->batch_pool = pool;
2323 }
2324
2325 err = i915_gem_object_lock(pool->obj, &eb->ww);
2326 if (err)
2327 goto err;
2328
2329 shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
2330 if (IS_ERR(shadow)) {
2331 err = PTR_ERR(shadow);
2332 goto err;
2333 }
2334 i915_gem_object_set_readonly(shadow->obj);
2335 shadow->private = pool;
2336
2337 trampoline = NULL;
2338 if (CMDPARSER_USES_GGTT(eb->i915)) {
2339 trampoline = shadow;
2340
2341 shadow = shadow_batch_pin(eb, pool->obj,
2342 &eb->engine->gt->ggtt->vm,
2343 PIN_GLOBAL);
2344 if (IS_ERR(shadow)) {
2345 err = PTR_ERR(shadow);
2346 shadow = trampoline;
2347 goto err_shadow;
2348 }
2349 shadow->private = pool;
2350
2351 eb->batch_flags |= I915_DISPATCH_SECURE;
2352 }
2353
2354 batch = eb_dispatch_secure(eb, shadow);
2355 if (IS_ERR(batch)) {
2356 err = PTR_ERR(batch);
2357 goto err_trampoline;
2358 }
2359
2360 err = intel_engine_cmd_parser(eb->engine,
2361 eb->batch->vma,
2362 eb->batch_start_offset,
2363 eb->batch_len,
2364 shadow, trampoline);
2365 if (err)
2366 goto err_unpin_batch;
2367
2368 eb->batch = &eb->vma[eb->buffer_count++];
2369 eb->batch->vma = i915_vma_get(shadow);
2370 eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
2371
2372 eb->trampoline = trampoline;
2373 eb->batch_start_offset = 0;
2374
2375 secure_batch:
2376 if (batch) {
2377 eb->batch = &eb->vma[eb->buffer_count++];
2378 eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
2379 eb->batch->vma = i915_vma_get(batch);
2380 }
2381 return 0;
2382
2383 err_unpin_batch:
2384 if (batch)
2385 i915_vma_unpin(batch);
2386 err_trampoline:
2387 if (trampoline)
2388 i915_vma_unpin(trampoline);
2389 err_shadow:
2390 i915_vma_unpin(shadow);
2391 err:
2392 return err;
2393 }
2394
eb_submit(struct i915_execbuffer * eb,struct i915_vma * batch)2395 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
2396 {
2397 int err;
2398
2399 err = eb_move_to_gpu(eb);
2400 if (err)
2401 return err;
2402
2403 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2404 err = i915_reset_gen7_sol_offsets(eb->request);
2405 if (err)
2406 return err;
2407 }
2408
2409 /*
2410 * After we completed waiting for other engines (using HW semaphores)
2411 * then we can signal that this request/batch is ready to run. This
2412 * allows us to determine if the batch is still waiting on the GPU
2413 * or actually running by checking the breadcrumb.
2414 */
2415 if (eb->engine->emit_init_breadcrumb) {
2416 err = eb->engine->emit_init_breadcrumb(eb->request);
2417 if (err)
2418 return err;
2419 }
2420
2421 err = eb->engine->emit_bb_start(eb->request,
2422 batch->node.start +
2423 eb->batch_start_offset,
2424 eb->batch_len,
2425 eb->batch_flags);
2426 if (err)
2427 return err;
2428
2429 if (eb->trampoline) {
2430 GEM_BUG_ON(eb->batch_start_offset);
2431 err = eb->engine->emit_bb_start(eb->request,
2432 eb->trampoline->node.start +
2433 eb->batch_len,
2434 0, 0);
2435 if (err)
2436 return err;
2437 }
2438
2439 if (intel_context_nopreempt(eb->context))
2440 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
2441
2442 return 0;
2443 }
2444
num_vcs_engines(const struct drm_i915_private * i915)2445 static int num_vcs_engines(const struct drm_i915_private *i915)
2446 {
2447 return hweight64(VDBOX_MASK(&i915->gt));
2448 }
2449
2450 /*
2451 * Find one BSD ring to dispatch the corresponding BSD command.
2452 * The engine index is returned.
2453 */
2454 static unsigned int
gen8_dispatch_bsd_engine(struct drm_i915_private * dev_priv,struct drm_file * file)2455 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
2456 struct drm_file *file)
2457 {
2458 struct drm_i915_file_private *file_priv = file->driver_priv;
2459
2460 /* Check whether the file_priv has already selected one ring. */
2461 if ((int)file_priv->bsd_engine < 0)
2462 file_priv->bsd_engine =
2463 get_random_int() % num_vcs_engines(dev_priv);
2464
2465 return file_priv->bsd_engine;
2466 }
2467
2468 static const enum intel_engine_id user_ring_map[] = {
2469 [I915_EXEC_DEFAULT] = RCS0,
2470 [I915_EXEC_RENDER] = RCS0,
2471 [I915_EXEC_BLT] = BCS0,
2472 [I915_EXEC_BSD] = VCS0,
2473 [I915_EXEC_VEBOX] = VECS0
2474 };
2475
eb_throttle(struct i915_execbuffer * eb,struct intel_context * ce)2476 static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
2477 {
2478 struct intel_ring *ring = ce->ring;
2479 struct intel_timeline *tl = ce->timeline;
2480 struct i915_request *rq;
2481
2482 /*
2483 * Completely unscientific finger-in-the-air estimates for suitable
2484 * maximum user request size (to avoid blocking) and then backoff.
2485 */
2486 if (intel_ring_update_space(ring) >= PAGE_SIZE)
2487 return NULL;
2488
2489 /*
2490 * Find a request that after waiting upon, there will be at least half
2491 * the ring available. The hysteresis allows us to compete for the
2492 * shared ring and should mean that we sleep less often prior to
2493 * claiming our resources, but not so long that the ring completely
2494 * drains before we can submit our next request.
2495 */
2496 list_for_each_entry(rq, &tl->requests, link) {
2497 if (rq->ring != ring)
2498 continue;
2499
2500 if (__intel_ring_space(rq->postfix,
2501 ring->emit, ring->size) > ring->size / 2)
2502 break;
2503 }
2504 if (&rq->link == &tl->requests)
2505 return NULL; /* weird, we will check again later for real */
2506
2507 return i915_request_get(rq);
2508 }
2509
eb_pin_engine(struct i915_execbuffer * eb,bool throttle)2510 static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
2511 {
2512 struct intel_context *ce = eb->context;
2513 struct intel_timeline *tl;
2514 struct i915_request *rq = NULL;
2515 int err;
2516
2517 GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
2518
2519 if (unlikely(intel_context_is_banned(ce)))
2520 return ERR_PTR(-EIO);
2521
2522 /*
2523 * Pinning the contexts may generate requests in order to acquire
2524 * GGTT space, so do this first before we reserve a seqno for
2525 * ourselves.
2526 */
2527 err = intel_context_pin_ww(ce, &eb->ww);
2528 if (err)
2529 return ERR_PTR(err);
2530
2531 /*
2532 * Take a local wakeref for preparing to dispatch the execbuf as
2533 * we expect to access the hardware fairly frequently in the
2534 * process, and require the engine to be kept awake between accesses.
2535 * Upon dispatch, we acquire another prolonged wakeref that we hold
2536 * until the timeline is idle, which in turn releases the wakeref
2537 * taken on the engine, and the parent device.
2538 */
2539 tl = intel_context_timeline_lock(ce);
2540 if (IS_ERR(tl)) {
2541 intel_context_unpin(ce);
2542 return ERR_CAST(tl);
2543 }
2544
2545 intel_context_enter(ce);
2546 if (throttle)
2547 rq = eb_throttle(eb, ce);
2548 intel_context_timeline_unlock(tl);
2549
2550 eb->args->flags |= __EXEC_ENGINE_PINNED;
2551 return rq;
2552 }
2553
eb_unpin_engine(struct i915_execbuffer * eb)2554 static void eb_unpin_engine(struct i915_execbuffer *eb)
2555 {
2556 struct intel_context *ce = eb->context;
2557 struct intel_timeline *tl = ce->timeline;
2558
2559 if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
2560 return;
2561
2562 eb->args->flags &= ~__EXEC_ENGINE_PINNED;
2563
2564 mutex_lock(&tl->mutex);
2565 intel_context_exit(ce);
2566 mutex_unlock(&tl->mutex);
2567
2568 intel_context_unpin(ce);
2569 }
2570
2571 static unsigned int
eb_select_legacy_ring(struct i915_execbuffer * eb)2572 eb_select_legacy_ring(struct i915_execbuffer *eb)
2573 {
2574 struct drm_i915_private *i915 = eb->i915;
2575 struct drm_i915_gem_execbuffer2 *args = eb->args;
2576 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2577
2578 if (user_ring_id != I915_EXEC_BSD &&
2579 (args->flags & I915_EXEC_BSD_MASK)) {
2580 drm_dbg(&i915->drm,
2581 "execbuf with non bsd ring but with invalid "
2582 "bsd dispatch flags: %d\n", (int)(args->flags));
2583 return -1;
2584 }
2585
2586 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2587 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2588
2589 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2590 bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
2591 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2592 bsd_idx <= I915_EXEC_BSD_RING2) {
2593 bsd_idx >>= I915_EXEC_BSD_SHIFT;
2594 bsd_idx--;
2595 } else {
2596 drm_dbg(&i915->drm,
2597 "execbuf with unknown bsd ring: %u\n",
2598 bsd_idx);
2599 return -1;
2600 }
2601
2602 return _VCS(bsd_idx);
2603 }
2604
2605 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2606 drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
2607 user_ring_id);
2608 return -1;
2609 }
2610
2611 return user_ring_map[user_ring_id];
2612 }
2613
2614 static int
eb_select_engine(struct i915_execbuffer * eb)2615 eb_select_engine(struct i915_execbuffer *eb)
2616 {
2617 struct intel_context *ce;
2618 unsigned int idx;
2619 int err;
2620
2621 if (i915_gem_context_user_engines(eb->gem_context))
2622 idx = eb->args->flags & I915_EXEC_RING_MASK;
2623 else
2624 idx = eb_select_legacy_ring(eb);
2625
2626 ce = i915_gem_context_get_engine(eb->gem_context, idx);
2627 if (IS_ERR(ce))
2628 return PTR_ERR(ce);
2629
2630 intel_gt_pm_get(ce->engine->gt);
2631
2632 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
2633 err = intel_context_alloc_state(ce);
2634 if (err)
2635 goto err;
2636 }
2637
2638 /*
2639 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2640 * EIO if the GPU is already wedged.
2641 */
2642 err = intel_gt_terminally_wedged(ce->engine->gt);
2643 if (err)
2644 goto err;
2645
2646 eb->context = ce;
2647 eb->engine = ce->engine;
2648
2649 /*
2650 * Make sure engine pool stays alive even if we call intel_context_put
2651 * during ww handling. The pool is destroyed when last pm reference
2652 * is dropped, which breaks our -EDEADLK handling.
2653 */
2654 return err;
2655
2656 err:
2657 intel_gt_pm_put(ce->engine->gt);
2658 intel_context_put(ce);
2659 return err;
2660 }
2661
2662 static void
eb_put_engine(struct i915_execbuffer * eb)2663 eb_put_engine(struct i915_execbuffer *eb)
2664 {
2665 intel_gt_pm_put(eb->engine->gt);
2666 intel_context_put(eb->context);
2667 }
2668
2669 static void
__free_fence_array(struct eb_fence * fences,unsigned int n)2670 __free_fence_array(struct eb_fence *fences, unsigned int n)
2671 {
2672 while (n--) {
2673 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
2674 dma_fence_put(fences[n].dma_fence);
2675 kfree(fences[n].chain_fence);
2676 }
2677 kvfree(fences);
2678 }
2679
2680 static int
add_timeline_fence_array(struct i915_execbuffer * eb,const struct drm_i915_gem_execbuffer_ext_timeline_fences * timeline_fences)2681 add_timeline_fence_array(struct i915_execbuffer *eb,
2682 const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
2683 {
2684 struct drm_i915_gem_exec_fence __user *user_fences;
2685 u64 __user *user_values;
2686 struct eb_fence *f;
2687 u64 nfences;
2688 int err = 0;
2689
2690 nfences = timeline_fences->fence_count;
2691 if (!nfences)
2692 return 0;
2693
2694 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2695 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2696 if (nfences > min_t(unsigned long,
2697 ULONG_MAX / sizeof(*user_fences),
2698 SIZE_MAX / sizeof(*f)) - eb->num_fences)
2699 return -EINVAL;
2700
2701 user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
2702 if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
2703 return -EFAULT;
2704
2705 user_values = u64_to_user_ptr(timeline_fences->values_ptr);
2706 if (!access_ok(user_values, nfences * sizeof(*user_values)))
2707 return -EFAULT;
2708
2709 f = krealloc(eb->fences,
2710 (eb->num_fences + nfences) * sizeof(*f),
2711 __GFP_NOWARN | GFP_KERNEL);
2712 if (!f)
2713 return -ENOMEM;
2714
2715 eb->fences = f;
2716 f += eb->num_fences;
2717
2718 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2719 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2720
2721 while (nfences--) {
2722 struct drm_i915_gem_exec_fence user_fence;
2723 struct drm_syncobj *syncobj;
2724 struct dma_fence *fence = NULL;
2725 u64 point;
2726
2727 if (__copy_from_user(&user_fence,
2728 user_fences++,
2729 sizeof(user_fence)))
2730 return -EFAULT;
2731
2732 if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
2733 return -EINVAL;
2734
2735 if (__get_user(point, user_values++))
2736 return -EFAULT;
2737
2738 syncobj = drm_syncobj_find(eb->file, user_fence.handle);
2739 if (!syncobj) {
2740 DRM_DEBUG("Invalid syncobj handle provided\n");
2741 return -ENOENT;
2742 }
2743
2744 fence = drm_syncobj_fence_get(syncobj);
2745
2746 if (!fence && user_fence.flags &&
2747 !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
2748 DRM_DEBUG("Syncobj handle has no fence\n");
2749 drm_syncobj_put(syncobj);
2750 return -EINVAL;
2751 }
2752
2753 if (fence)
2754 err = dma_fence_chain_find_seqno(&fence, point);
2755
2756 if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
2757 DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
2758 dma_fence_put(fence);
2759 drm_syncobj_put(syncobj);
2760 return err;
2761 }
2762
2763 /*
2764 * A point might have been signaled already and
2765 * garbage collected from the timeline. In this case
2766 * just ignore the point and carry on.
2767 */
2768 if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
2769 drm_syncobj_put(syncobj);
2770 continue;
2771 }
2772
2773 /*
2774 * For timeline syncobjs we need to preallocate chains for
2775 * later signaling.
2776 */
2777 if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
2778 /*
2779 * Waiting and signaling the same point (when point !=
2780 * 0) would break the timeline.
2781 */
2782 if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
2783 DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
2784 dma_fence_put(fence);
2785 drm_syncobj_put(syncobj);
2786 return -EINVAL;
2787 }
2788
2789 f->chain_fence =
2790 kmalloc(sizeof(*f->chain_fence),
2791 GFP_KERNEL);
2792 if (!f->chain_fence) {
2793 drm_syncobj_put(syncobj);
2794 dma_fence_put(fence);
2795 return -ENOMEM;
2796 }
2797 } else {
2798 f->chain_fence = NULL;
2799 }
2800
2801 f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
2802 f->dma_fence = fence;
2803 f->value = point;
2804 f++;
2805 eb->num_fences++;
2806 }
2807
2808 return 0;
2809 }
2810
add_fence_array(struct i915_execbuffer * eb)2811 static int add_fence_array(struct i915_execbuffer *eb)
2812 {
2813 struct drm_i915_gem_execbuffer2 *args = eb->args;
2814 struct drm_i915_gem_exec_fence __user *user;
2815 unsigned long num_fences = args->num_cliprects;
2816 struct eb_fence *f;
2817
2818 if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2819 return 0;
2820
2821 if (!num_fences)
2822 return 0;
2823
2824 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2825 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2826 if (num_fences > min_t(unsigned long,
2827 ULONG_MAX / sizeof(*user),
2828 SIZE_MAX / sizeof(*f) - eb->num_fences))
2829 return -EINVAL;
2830
2831 user = u64_to_user_ptr(args->cliprects_ptr);
2832 if (!access_ok(user, num_fences * sizeof(*user)))
2833 return -EFAULT;
2834
2835 f = krealloc(eb->fences,
2836 (eb->num_fences + num_fences) * sizeof(*f),
2837 __GFP_NOWARN | GFP_KERNEL);
2838 if (!f)
2839 return -ENOMEM;
2840
2841 eb->fences = f;
2842 f += eb->num_fences;
2843 while (num_fences--) {
2844 struct drm_i915_gem_exec_fence user_fence;
2845 struct drm_syncobj *syncobj;
2846 struct dma_fence *fence = NULL;
2847
2848 if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
2849 return -EFAULT;
2850
2851 if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
2852 return -EINVAL;
2853
2854 syncobj = drm_syncobj_find(eb->file, user_fence.handle);
2855 if (!syncobj) {
2856 DRM_DEBUG("Invalid syncobj handle provided\n");
2857 return -ENOENT;
2858 }
2859
2860 if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
2861 fence = drm_syncobj_fence_get(syncobj);
2862 if (!fence) {
2863 DRM_DEBUG("Syncobj handle has no fence\n");
2864 drm_syncobj_put(syncobj);
2865 return -EINVAL;
2866 }
2867 }
2868
2869 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2870 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2871
2872 f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
2873 f->dma_fence = fence;
2874 f->value = 0;
2875 f->chain_fence = NULL;
2876 f++;
2877 eb->num_fences++;
2878 }
2879
2880 return 0;
2881 }
2882
put_fence_array(struct eb_fence * fences,int num_fences)2883 static void put_fence_array(struct eb_fence *fences, int num_fences)
2884 {
2885 if (fences)
2886 __free_fence_array(fences, num_fences);
2887 }
2888
2889 static int
await_fence_array(struct i915_execbuffer * eb)2890 await_fence_array(struct i915_execbuffer *eb)
2891 {
2892 unsigned int n;
2893 int err;
2894
2895 for (n = 0; n < eb->num_fences; n++) {
2896 struct drm_syncobj *syncobj;
2897 unsigned int flags;
2898
2899 syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
2900
2901 if (!eb->fences[n].dma_fence)
2902 continue;
2903
2904 err = i915_request_await_dma_fence(eb->request,
2905 eb->fences[n].dma_fence);
2906 if (err < 0)
2907 return err;
2908 }
2909
2910 return 0;
2911 }
2912
signal_fence_array(const struct i915_execbuffer * eb)2913 static void signal_fence_array(const struct i915_execbuffer *eb)
2914 {
2915 struct dma_fence * const fence = &eb->request->fence;
2916 unsigned int n;
2917
2918 for (n = 0; n < eb->num_fences; n++) {
2919 struct drm_syncobj *syncobj;
2920 unsigned int flags;
2921
2922 syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
2923 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2924 continue;
2925
2926 if (eb->fences[n].chain_fence) {
2927 drm_syncobj_add_point(syncobj,
2928 eb->fences[n].chain_fence,
2929 fence,
2930 eb->fences[n].value);
2931 /*
2932 * The chain's ownership is transferred to the
2933 * timeline.
2934 */
2935 eb->fences[n].chain_fence = NULL;
2936 } else {
2937 drm_syncobj_replace_fence(syncobj, fence);
2938 }
2939 }
2940 }
2941
2942 static int
parse_timeline_fences(struct i915_user_extension __user * ext,void * data)2943 parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
2944 {
2945 struct i915_execbuffer *eb = data;
2946 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
2947
2948 if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
2949 return -EFAULT;
2950
2951 return add_timeline_fence_array(eb, &timeline_fences);
2952 }
2953
retire_requests(struct intel_timeline * tl,struct i915_request * end)2954 static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
2955 {
2956 struct i915_request *rq, *rn;
2957
2958 list_for_each_entry_safe(rq, rn, &tl->requests, link)
2959 if (rq == end || !i915_request_retire(rq))
2960 break;
2961 }
2962
eb_request_add(struct i915_execbuffer * eb,int err)2963 static int eb_request_add(struct i915_execbuffer *eb, int err)
2964 {
2965 struct i915_request *rq = eb->request;
2966 struct intel_timeline * const tl = i915_request_timeline(rq);
2967 struct i915_sched_attr attr = {};
2968 struct i915_request *prev;
2969
2970 lockdep_assert_held(&tl->mutex);
2971 lockdep_unpin_lock(&tl->mutex, rq->cookie);
2972
2973 trace_i915_request_add(rq);
2974
2975 prev = __i915_request_commit(rq);
2976
2977 /* Check that the context wasn't destroyed before submission */
2978 if (likely(!intel_context_is_closed(eb->context))) {
2979 attr = eb->gem_context->sched;
2980 } else {
2981 /* Serialise with context_close via the add_to_timeline */
2982 i915_request_set_error_once(rq, -ENOENT);
2983 __i915_request_skip(rq);
2984 err = -ENOENT; /* override any transient errors */
2985 }
2986
2987 __i915_request_queue(rq, &attr);
2988
2989 /* Try to clean up the client's timeline after submitting the request */
2990 if (prev)
2991 retire_requests(tl, prev);
2992
2993 mutex_unlock(&tl->mutex);
2994
2995 return err;
2996 }
2997
2998 static const i915_user_extension_fn execbuf_extensions[] = {
2999 [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
3000 };
3001
3002 static int
parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 * args,struct i915_execbuffer * eb)3003 parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
3004 struct i915_execbuffer *eb)
3005 {
3006 if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
3007 return 0;
3008
3009 /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
3010 * have another flag also using it at the same time.
3011 */
3012 if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
3013 return -EINVAL;
3014
3015 if (args->num_cliprects != 0)
3016 return -EINVAL;
3017
3018 return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
3019 execbuf_extensions,
3020 ARRAY_SIZE(execbuf_extensions),
3021 eb);
3022 }
3023
3024 static int
i915_gem_do_execbuffer(struct drm_device * dev,struct drm_file * file,struct drm_i915_gem_execbuffer2 * args,struct drm_i915_gem_exec_object2 * exec)3025 i915_gem_do_execbuffer(struct drm_device *dev,
3026 struct drm_file *file,
3027 struct drm_i915_gem_execbuffer2 *args,
3028 struct drm_i915_gem_exec_object2 *exec)
3029 {
3030 struct drm_i915_private *i915 = to_i915(dev);
3031 struct i915_execbuffer eb;
3032 struct dma_fence *in_fence = NULL;
3033 struct sync_file *out_fence = NULL;
3034 struct i915_vma *batch;
3035 int out_fence_fd = -1;
3036 int err;
3037
3038 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
3039 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
3040 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
3041
3042 eb.i915 = i915;
3043 eb.file = file;
3044 eb.args = args;
3045 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
3046 args->flags |= __EXEC_HAS_RELOC;
3047
3048 eb.exec = exec;
3049 eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
3050 eb.vma[0].vma = NULL;
3051 eb.reloc_pool = eb.batch_pool = NULL;
3052 eb.reloc_context = NULL;
3053
3054 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
3055 reloc_cache_init(&eb.reloc_cache, eb.i915);
3056
3057 eb.buffer_count = args->buffer_count;
3058 eb.batch_start_offset = args->batch_start_offset;
3059 eb.batch_len = args->batch_len;
3060 eb.trampoline = NULL;
3061
3062 eb.fences = NULL;
3063 eb.num_fences = 0;
3064
3065 eb.batch_flags = 0;
3066 if (args->flags & I915_EXEC_SECURE) {
3067 if (INTEL_GEN(i915) >= 11)
3068 return -ENODEV;
3069
3070 /* Return -EPERM to trigger fallback code on old binaries. */
3071 if (!HAS_SECURE_BATCHES(i915))
3072 return -EPERM;
3073
3074 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
3075 return -EPERM;
3076
3077 eb.batch_flags |= I915_DISPATCH_SECURE;
3078 }
3079 if (args->flags & I915_EXEC_IS_PINNED)
3080 eb.batch_flags |= I915_DISPATCH_PINNED;
3081
3082 err = parse_execbuf2_extensions(args, &eb);
3083 if (err)
3084 goto err_ext;
3085
3086 err = add_fence_array(&eb);
3087 if (err)
3088 goto err_ext;
3089
3090 #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
3091 if (args->flags & IN_FENCES) {
3092 if ((args->flags & IN_FENCES) == IN_FENCES)
3093 return -EINVAL;
3094
3095 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
3096 if (!in_fence) {
3097 err = -EINVAL;
3098 goto err_ext;
3099 }
3100 }
3101 #undef IN_FENCES
3102
3103 if (args->flags & I915_EXEC_FENCE_OUT) {
3104 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3105 if (out_fence_fd < 0) {
3106 err = out_fence_fd;
3107 goto err_in_fence;
3108 }
3109 }
3110
3111 err = eb_create(&eb);
3112 if (err)
3113 goto err_out_fence;
3114
3115 GEM_BUG_ON(!eb.lut_size);
3116
3117 err = eb_select_context(&eb);
3118 if (unlikely(err))
3119 goto err_destroy;
3120
3121 err = eb_select_engine(&eb);
3122 if (unlikely(err))
3123 goto err_context;
3124
3125 err = eb_lookup_vmas(&eb);
3126 if (err) {
3127 eb_release_vmas(&eb, true);
3128 goto err_engine;
3129 }
3130
3131 i915_gem_ww_ctx_init(&eb.ww, true);
3132
3133 err = eb_relocate_parse(&eb);
3134 if (err) {
3135 /*
3136 * If the user expects the execobject.offset and
3137 * reloc.presumed_offset to be an exact match,
3138 * as for using NO_RELOC, then we cannot update
3139 * the execobject.offset until we have completed
3140 * relocation.
3141 */
3142 args->flags &= ~__EXEC_HAS_RELOC;
3143 goto err_vma;
3144 }
3145
3146 ww_acquire_done(&eb.ww.ctx);
3147
3148 batch = eb.batch->vma;
3149
3150 /* All GPU relocation batches must be submitted prior to the user rq */
3151 GEM_BUG_ON(eb.reloc_cache.rq);
3152
3153 /* Allocate a request for this batch buffer nice and early. */
3154 eb.request = i915_request_create(eb.context);
3155 if (IS_ERR(eb.request)) {
3156 err = PTR_ERR(eb.request);
3157 goto err_vma;
3158 }
3159
3160 if (in_fence) {
3161 if (args->flags & I915_EXEC_FENCE_SUBMIT)
3162 err = i915_request_await_execution(eb.request,
3163 in_fence,
3164 eb.engine->bond_execute);
3165 else
3166 err = i915_request_await_dma_fence(eb.request,
3167 in_fence);
3168 if (err < 0)
3169 goto err_request;
3170 }
3171
3172 if (eb.fences) {
3173 err = await_fence_array(&eb);
3174 if (err)
3175 goto err_request;
3176 }
3177
3178 if (out_fence_fd != -1) {
3179 out_fence = sync_file_create(&eb.request->fence);
3180 if (!out_fence) {
3181 err = -ENOMEM;
3182 goto err_request;
3183 }
3184 }
3185
3186 /*
3187 * Whilst this request exists, batch_obj will be on the
3188 * active_list, and so will hold the active reference. Only when this
3189 * request is retired will the the batch_obj be moved onto the
3190 * inactive_list and lose its active reference. Hence we do not need
3191 * to explicitly hold another reference here.
3192 */
3193 eb.request->batch = batch;
3194 if (eb.batch_pool)
3195 intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request);
3196
3197 trace_i915_request_queue(eb.request, eb.batch_flags);
3198 err = eb_submit(&eb, batch);
3199 err_request:
3200 i915_request_get(eb.request);
3201 err = eb_request_add(&eb, err);
3202
3203 if (eb.fences)
3204 signal_fence_array(&eb);
3205
3206 if (out_fence) {
3207 if (err == 0) {
3208 fd_install(out_fence_fd, out_fence->file);
3209 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
3210 args->rsvd2 |= (u64)out_fence_fd << 32;
3211 out_fence_fd = -1;
3212 } else {
3213 fput(out_fence->file);
3214 }
3215 }
3216 i915_request_put(eb.request);
3217
3218 err_vma:
3219 eb_release_vmas(&eb, true);
3220 if (eb.trampoline)
3221 i915_vma_unpin(eb.trampoline);
3222 WARN_ON(err == -EDEADLK);
3223 i915_gem_ww_ctx_fini(&eb.ww);
3224
3225 if (eb.batch_pool)
3226 intel_gt_buffer_pool_put(eb.batch_pool);
3227 if (eb.reloc_pool)
3228 intel_gt_buffer_pool_put(eb.reloc_pool);
3229 if (eb.reloc_context)
3230 intel_context_put(eb.reloc_context);
3231 err_engine:
3232 eb_put_engine(&eb);
3233 err_context:
3234 i915_gem_context_put(eb.gem_context);
3235 err_destroy:
3236 eb_destroy(&eb);
3237 err_out_fence:
3238 if (out_fence_fd != -1)
3239 put_unused_fd(out_fence_fd);
3240 err_in_fence:
3241 dma_fence_put(in_fence);
3242 err_ext:
3243 put_fence_array(eb.fences, eb.num_fences);
3244 return err;
3245 }
3246
eb_element_size(void)3247 static size_t eb_element_size(void)
3248 {
3249 return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
3250 }
3251
check_buffer_count(size_t count)3252 static bool check_buffer_count(size_t count)
3253 {
3254 const size_t sz = eb_element_size();
3255
3256 /*
3257 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
3258 * array size (see eb_create()). Otherwise, we can accept an array as
3259 * large as can be addressed (though use large arrays at your peril)!
3260 */
3261
3262 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
3263 }
3264
3265 /*
3266 * Legacy execbuffer just creates an exec2 list from the original exec object
3267 * list array and passes it to the real function.
3268 */
3269 int
i915_gem_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3270 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
3271 struct drm_file *file)
3272 {
3273 struct drm_i915_private *i915 = to_i915(dev);
3274 struct drm_i915_gem_execbuffer *args = data;
3275 struct drm_i915_gem_execbuffer2 exec2;
3276 struct drm_i915_gem_exec_object *exec_list = NULL;
3277 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3278 const size_t count = args->buffer_count;
3279 unsigned int i;
3280 int err;
3281
3282 if (!check_buffer_count(count)) {
3283 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
3284 return -EINVAL;
3285 }
3286
3287 exec2.buffers_ptr = args->buffers_ptr;
3288 exec2.buffer_count = args->buffer_count;
3289 exec2.batch_start_offset = args->batch_start_offset;
3290 exec2.batch_len = args->batch_len;
3291 exec2.DR1 = args->DR1;
3292 exec2.DR4 = args->DR4;
3293 exec2.num_cliprects = args->num_cliprects;
3294 exec2.cliprects_ptr = args->cliprects_ptr;
3295 exec2.flags = I915_EXEC_RENDER;
3296 i915_execbuffer2_set_context_id(exec2, 0);
3297
3298 err = i915_gem_check_execbuffer(&exec2);
3299 if (err)
3300 return err;
3301
3302 /* Copy in the exec list from userland */
3303 exec_list = kvmalloc_array(count, sizeof(*exec_list),
3304 __GFP_NOWARN | GFP_KERNEL);
3305
3306 /* Allocate extra slots for use by the command parser */
3307 exec2_list = kvmalloc_array(count + 2, eb_element_size(),
3308 __GFP_NOWARN | GFP_KERNEL);
3309 if (exec_list == NULL || exec2_list == NULL) {
3310 drm_dbg(&i915->drm,
3311 "Failed to allocate exec list for %d buffers\n",
3312 args->buffer_count);
3313 kvfree(exec_list);
3314 kvfree(exec2_list);
3315 return -ENOMEM;
3316 }
3317 err = copy_from_user(exec_list,
3318 u64_to_user_ptr(args->buffers_ptr),
3319 sizeof(*exec_list) * count);
3320 if (err) {
3321 drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
3322 args->buffer_count, err);
3323 kvfree(exec_list);
3324 kvfree(exec2_list);
3325 return -EFAULT;
3326 }
3327
3328 for (i = 0; i < args->buffer_count; i++) {
3329 exec2_list[i].handle = exec_list[i].handle;
3330 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3331 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3332 exec2_list[i].alignment = exec_list[i].alignment;
3333 exec2_list[i].offset = exec_list[i].offset;
3334 if (INTEL_GEN(to_i915(dev)) < 4)
3335 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3336 else
3337 exec2_list[i].flags = 0;
3338 }
3339
3340 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
3341 if (exec2.flags & __EXEC_HAS_RELOC) {
3342 struct drm_i915_gem_exec_object __user *user_exec_list =
3343 u64_to_user_ptr(args->buffers_ptr);
3344
3345 /* Copy the new buffer offsets back to the user's exec list. */
3346 for (i = 0; i < args->buffer_count; i++) {
3347 if (!(exec2_list[i].offset & UPDATE))
3348 continue;
3349
3350 exec2_list[i].offset =
3351 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
3352 exec2_list[i].offset &= PIN_OFFSET_MASK;
3353 if (__copy_to_user(&user_exec_list[i].offset,
3354 &exec2_list[i].offset,
3355 sizeof(user_exec_list[i].offset)))
3356 break;
3357 }
3358 }
3359
3360 kvfree(exec_list);
3361 kvfree(exec2_list);
3362 return err;
3363 }
3364
3365 int
i915_gem_execbuffer2_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3366 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
3367 struct drm_file *file)
3368 {
3369 struct drm_i915_private *i915 = to_i915(dev);
3370 struct drm_i915_gem_execbuffer2 *args = data;
3371 struct drm_i915_gem_exec_object2 *exec2_list;
3372 const size_t count = args->buffer_count;
3373 int err;
3374
3375 if (!check_buffer_count(count)) {
3376 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
3377 return -EINVAL;
3378 }
3379
3380 err = i915_gem_check_execbuffer(args);
3381 if (err)
3382 return err;
3383
3384 /* Allocate extra slots for use by the command parser */
3385 exec2_list = kvmalloc_array(count + 2, eb_element_size(),
3386 __GFP_NOWARN | GFP_KERNEL);
3387 if (exec2_list == NULL) {
3388 drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
3389 count);
3390 return -ENOMEM;
3391 }
3392 if (copy_from_user(exec2_list,
3393 u64_to_user_ptr(args->buffers_ptr),
3394 sizeof(*exec2_list) * count)) {
3395 drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
3396 kvfree(exec2_list);
3397 return -EFAULT;
3398 }
3399
3400 err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
3401
3402 /*
3403 * Now that we have begun execution of the batchbuffer, we ignore
3404 * any new error after this point. Also given that we have already
3405 * updated the associated relocations, we try to write out the current
3406 * object locations irrespective of any error.
3407 */
3408 if (args->flags & __EXEC_HAS_RELOC) {
3409 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3410 u64_to_user_ptr(args->buffers_ptr);
3411 unsigned int i;
3412
3413 /* Copy the new buffer offsets back to the user's exec list. */
3414 /*
3415 * Note: count * sizeof(*user_exec_list) does not overflow,
3416 * because we checked 'count' in check_buffer_count().
3417 *
3418 * And this range already got effectively checked earlier
3419 * when we did the "copy_from_user()" above.
3420 */
3421 if (!user_write_access_begin(user_exec_list,
3422 count * sizeof(*user_exec_list)))
3423 goto end;
3424
3425 for (i = 0; i < args->buffer_count; i++) {
3426 if (!(exec2_list[i].offset & UPDATE))
3427 continue;
3428
3429 exec2_list[i].offset =
3430 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
3431 unsafe_put_user(exec2_list[i].offset,
3432 &user_exec_list[i].offset,
3433 end_user);
3434 }
3435 end_user:
3436 user_write_access_end();
3437 end:;
3438 }
3439
3440 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
3441 kvfree(exec2_list);
3442 return err;
3443 }
3444
3445 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3446 #include "selftests/i915_gem_execbuffer.c"
3447 #endif
3448