1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include <xf86drm.h>
31
32 #include "anv_private.h"
33 #include "anv_measure.h"
34
35 #include "genxml/gen8_pack.h"
36 #include "genxml/genX_bits.h"
37 #include "perf/intel_perf.h"
38
39 #include "util/u_debug.h"
40 #include "util/perf/u_trace.h"
41
42 /** \file anv_batch_chain.c
43 *
44 * This file contains functions related to anv_cmd_buffer as a data
45 * structure. This involves everything required to create and destroy
46 * the actual batch buffers as well as link them together and handle
47 * relocations and surface state. It specifically does *not* contain any
48 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
49 */
50
51 /*-----------------------------------------------------------------------*
52 * Functions related to anv_reloc_list
53 *-----------------------------------------------------------------------*/
54
55 VkResult
anv_reloc_list_init(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)56 anv_reloc_list_init(struct anv_reloc_list *list,
57 const VkAllocationCallbacks *alloc)
58 {
59 memset(list, 0, sizeof(*list));
60 return VK_SUCCESS;
61 }
62
63 static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,const struct anv_reloc_list * other_list)64 anv_reloc_list_init_clone(struct anv_reloc_list *list,
65 const VkAllocationCallbacks *alloc,
66 const struct anv_reloc_list *other_list)
67 {
68 list->num_relocs = other_list->num_relocs;
69 list->array_length = other_list->array_length;
70
71 if (list->num_relocs > 0) {
72 list->relocs =
73 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
74 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
75 if (list->relocs == NULL)
76 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
77
78 list->reloc_bos =
79 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
80 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
81 if (list->reloc_bos == NULL) {
82 vk_free(alloc, list->relocs);
83 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
84 }
85
86 memcpy(list->relocs, other_list->relocs,
87 list->array_length * sizeof(*list->relocs));
88 memcpy(list->reloc_bos, other_list->reloc_bos,
89 list->array_length * sizeof(*list->reloc_bos));
90 } else {
91 list->relocs = NULL;
92 list->reloc_bos = NULL;
93 }
94
95 list->dep_words = other_list->dep_words;
96
97 if (list->dep_words > 0) {
98 list->deps =
99 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
100 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
101 memcpy(list->deps, other_list->deps,
102 list->dep_words * sizeof(BITSET_WORD));
103 } else {
104 list->deps = NULL;
105 }
106
107 return VK_SUCCESS;
108 }
109
110 void
anv_reloc_list_finish(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)111 anv_reloc_list_finish(struct anv_reloc_list *list,
112 const VkAllocationCallbacks *alloc)
113 {
114 vk_free(alloc, list->relocs);
115 vk_free(alloc, list->reloc_bos);
116 vk_free(alloc, list->deps);
117 }
118
119 static VkResult
anv_reloc_list_grow(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,size_t num_additional_relocs)120 anv_reloc_list_grow(struct anv_reloc_list *list,
121 const VkAllocationCallbacks *alloc,
122 size_t num_additional_relocs)
123 {
124 if (list->num_relocs + num_additional_relocs <= list->array_length)
125 return VK_SUCCESS;
126
127 size_t new_length = MAX2(16, list->array_length * 2);
128 while (new_length < list->num_relocs + num_additional_relocs)
129 new_length *= 2;
130
131 struct drm_i915_gem_relocation_entry *new_relocs =
132 vk_realloc(alloc, list->relocs,
133 new_length * sizeof(*list->relocs), 8,
134 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
135 if (new_relocs == NULL)
136 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
137 list->relocs = new_relocs;
138
139 struct anv_bo **new_reloc_bos =
140 vk_realloc(alloc, list->reloc_bos,
141 new_length * sizeof(*list->reloc_bos), 8,
142 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
143 if (new_reloc_bos == NULL)
144 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
145 list->reloc_bos = new_reloc_bos;
146
147 list->array_length = new_length;
148
149 return VK_SUCCESS;
150 }
151
152 static VkResult
anv_reloc_list_grow_deps(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t min_num_words)153 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
154 const VkAllocationCallbacks *alloc,
155 uint32_t min_num_words)
156 {
157 if (min_num_words <= list->dep_words)
158 return VK_SUCCESS;
159
160 uint32_t new_length = MAX2(32, list->dep_words * 2);
161 while (new_length < min_num_words)
162 new_length *= 2;
163
164 BITSET_WORD *new_deps =
165 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
166 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
167 if (new_deps == NULL)
168 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
169 list->deps = new_deps;
170
171 /* Zero out the new data */
172 memset(list->deps + list->dep_words, 0,
173 (new_length - list->dep_words) * sizeof(BITSET_WORD));
174 list->dep_words = new_length;
175
176 return VK_SUCCESS;
177 }
178
179 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
180
181 VkResult
anv_reloc_list_add_bo(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_bo * target_bo)182 anv_reloc_list_add_bo(struct anv_reloc_list *list,
183 const VkAllocationCallbacks *alloc,
184 struct anv_bo *target_bo)
185 {
186 assert(!target_bo->is_wrapper);
187 assert(anv_bo_is_pinned(target_bo));
188
189 uint32_t idx = target_bo->gem_handle;
190 VkResult result = anv_reloc_list_grow_deps(list, alloc,
191 (idx / BITSET_WORDBITS) + 1);
192 if (unlikely(result != VK_SUCCESS))
193 return result;
194
195 BITSET_SET(list->deps, idx);
196
197 return VK_SUCCESS;
198 }
199
200 VkResult
anv_reloc_list_add(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t offset,struct anv_bo * target_bo,uint32_t delta,uint64_t * address_u64_out)201 anv_reloc_list_add(struct anv_reloc_list *list,
202 const VkAllocationCallbacks *alloc,
203 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
204 uint64_t *address_u64_out)
205 {
206 struct drm_i915_gem_relocation_entry *entry;
207 int index;
208
209 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
210 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
211 if (address_u64_out)
212 *address_u64_out = target_bo_offset + delta;
213
214 assert(unwrapped_target_bo->gem_handle > 0);
215 assert(unwrapped_target_bo->refcount > 0);
216
217 if (anv_bo_is_pinned(unwrapped_target_bo))
218 return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
219
220 VkResult result = anv_reloc_list_grow(list, alloc, 1);
221 if (result != VK_SUCCESS)
222 return result;
223
224 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
225 index = list->num_relocs++;
226 list->reloc_bos[index] = target_bo;
227 entry = &list->relocs[index];
228 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
229 entry->delta = delta;
230 entry->offset = offset;
231 entry->presumed_offset = target_bo_offset;
232 entry->read_domains = 0;
233 entry->write_domain = 0;
234 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
235
236 return VK_SUCCESS;
237 }
238
239 static void
anv_reloc_list_clear(struct anv_reloc_list * list)240 anv_reloc_list_clear(struct anv_reloc_list *list)
241 {
242 list->num_relocs = 0;
243 if (list->dep_words > 0)
244 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
245 }
246
247 static VkResult
anv_reloc_list_append(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_reloc_list * other,uint32_t offset)248 anv_reloc_list_append(struct anv_reloc_list *list,
249 const VkAllocationCallbacks *alloc,
250 struct anv_reloc_list *other, uint32_t offset)
251 {
252 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
253 if (result != VK_SUCCESS)
254 return result;
255
256 if (other->num_relocs > 0) {
257 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
258 other->num_relocs * sizeof(other->relocs[0]));
259 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
260 other->num_relocs * sizeof(other->reloc_bos[0]));
261
262 for (uint32_t i = 0; i < other->num_relocs; i++)
263 list->relocs[i + list->num_relocs].offset += offset;
264
265 list->num_relocs += other->num_relocs;
266 }
267
268 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
269 for (uint32_t w = 0; w < other->dep_words; w++)
270 list->deps[w] |= other->deps[w];
271
272 return VK_SUCCESS;
273 }
274
275 /*-----------------------------------------------------------------------*
276 * Functions related to anv_batch
277 *-----------------------------------------------------------------------*/
278
279 void *
anv_batch_emit_dwords(struct anv_batch * batch,int num_dwords)280 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
281 {
282 if (batch->next + num_dwords * 4 > batch->end) {
283 VkResult result = batch->extend_cb(batch, batch->user_data);
284 if (result != VK_SUCCESS) {
285 anv_batch_set_error(batch, result);
286 return NULL;
287 }
288 }
289
290 void *p = batch->next;
291
292 batch->next += num_dwords * 4;
293 assert(batch->next <= batch->end);
294
295 return p;
296 }
297
298 struct anv_address
anv_batch_address(struct anv_batch * batch,void * batch_location)299 anv_batch_address(struct anv_batch *batch, void *batch_location)
300 {
301 assert(batch->start <= batch_location);
302
303 /* Allow a jump at the current location of the batch. */
304 assert(batch->next >= batch_location);
305
306 return anv_address_add(batch->start_addr, batch_location - batch->start);
307 }
308
309 void
anv_batch_emit_batch(struct anv_batch * batch,struct anv_batch * other)310 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
311 {
312 uint32_t size, offset;
313
314 size = other->next - other->start;
315 assert(size % 4 == 0);
316
317 if (batch->next + size > batch->end) {
318 VkResult result = batch->extend_cb(batch, batch->user_data);
319 if (result != VK_SUCCESS) {
320 anv_batch_set_error(batch, result);
321 return;
322 }
323 }
324
325 assert(batch->next + size <= batch->end);
326
327 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
328 memcpy(batch->next, other->start, size);
329
330 offset = batch->next - batch->start;
331 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
332 other->relocs, offset);
333 if (result != VK_SUCCESS) {
334 anv_batch_set_error(batch, result);
335 return;
336 }
337
338 batch->next += size;
339 }
340
341 /*-----------------------------------------------------------------------*
342 * Functions related to anv_batch_bo
343 *-----------------------------------------------------------------------*/
344
345 static VkResult
anv_batch_bo_create(struct anv_cmd_buffer * cmd_buffer,uint32_t size,struct anv_batch_bo ** bbo_out)346 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
347 uint32_t size,
348 struct anv_batch_bo **bbo_out)
349 {
350 VkResult result;
351
352 struct anv_batch_bo *bbo = vk_zalloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
353 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
354 if (bbo == NULL)
355 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
356
357 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
358 size, &bbo->bo);
359 if (result != VK_SUCCESS)
360 goto fail_alloc;
361
362 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
363 if (result != VK_SUCCESS)
364 goto fail_bo_alloc;
365
366 *bbo_out = bbo;
367
368 return VK_SUCCESS;
369
370 fail_bo_alloc:
371 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
372 fail_alloc:
373 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
374
375 return result;
376 }
377
378 static VkResult
anv_batch_bo_clone(struct anv_cmd_buffer * cmd_buffer,const struct anv_batch_bo * other_bbo,struct anv_batch_bo ** bbo_out)379 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
380 const struct anv_batch_bo *other_bbo,
381 struct anv_batch_bo **bbo_out)
382 {
383 VkResult result;
384
385 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
386 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
387 if (bbo == NULL)
388 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
389
390 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
391 other_bbo->bo->size, &bbo->bo);
392 if (result != VK_SUCCESS)
393 goto fail_alloc;
394
395 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->vk.pool->alloc,
396 &other_bbo->relocs);
397 if (result != VK_SUCCESS)
398 goto fail_bo_alloc;
399
400 bbo->length = other_bbo->length;
401 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
402 *bbo_out = bbo;
403
404 return VK_SUCCESS;
405
406 fail_bo_alloc:
407 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
408 fail_alloc:
409 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
410
411 return result;
412 }
413
414 static void
anv_batch_bo_start(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)415 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
416 size_t batch_padding)
417 {
418 anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
419 bbo->bo->map, bbo->bo->size - batch_padding);
420 batch->relocs = &bbo->relocs;
421 anv_reloc_list_clear(&bbo->relocs);
422 }
423
424 static void
anv_batch_bo_continue(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)425 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
426 size_t batch_padding)
427 {
428 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
429 batch->start = bbo->bo->map;
430 batch->next = bbo->bo->map + bbo->length;
431 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
432 batch->relocs = &bbo->relocs;
433 }
434
435 static void
anv_batch_bo_finish(struct anv_batch_bo * bbo,struct anv_batch * batch)436 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
437 {
438 assert(batch->start == bbo->bo->map);
439 bbo->length = batch->next - batch->start;
440 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
441 }
442
443 static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo,struct anv_batch * batch,size_t additional,size_t batch_padding)444 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
445 struct anv_batch *batch, size_t additional,
446 size_t batch_padding)
447 {
448 assert(batch->start == bbo->bo->map);
449 bbo->length = batch->next - batch->start;
450
451 size_t new_size = bbo->bo->size;
452 while (new_size <= bbo->length + additional + batch_padding)
453 new_size *= 2;
454
455 if (new_size == bbo->bo->size)
456 return VK_SUCCESS;
457
458 struct anv_bo *new_bo;
459 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
460 new_size, &new_bo);
461 if (result != VK_SUCCESS)
462 return result;
463
464 memcpy(new_bo->map, bbo->bo->map, bbo->length);
465
466 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
467
468 bbo->bo = new_bo;
469 anv_batch_bo_continue(bbo, batch, batch_padding);
470
471 return VK_SUCCESS;
472 }
473
474 static void
anv_batch_bo_link(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * prev_bbo,struct anv_batch_bo * next_bbo,uint32_t next_bbo_offset)475 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
476 struct anv_batch_bo *prev_bbo,
477 struct anv_batch_bo *next_bbo,
478 uint32_t next_bbo_offset)
479 {
480 const uint32_t bb_start_offset =
481 prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
482 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
483
484 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
485 assert(((*bb_start >> 29) & 0x07) == 0);
486 assert(((*bb_start >> 23) & 0x3f) == 49);
487
488 if (anv_use_relocations(cmd_buffer->device->physical)) {
489 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
490 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
491
492 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
493 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
494
495 /* Use a bogus presumed offset to force a relocation */
496 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
497 } else {
498 assert(anv_bo_is_pinned(prev_bbo->bo));
499 assert(anv_bo_is_pinned(next_bbo->bo));
500
501 write_reloc(cmd_buffer->device,
502 prev_bbo->bo->map + bb_start_offset + 4,
503 next_bbo->bo->offset + next_bbo_offset, true);
504 }
505 }
506
507 static void
anv_batch_bo_destroy(struct anv_batch_bo * bbo,struct anv_cmd_buffer * cmd_buffer)508 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
509 struct anv_cmd_buffer *cmd_buffer)
510 {
511 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
512 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
513 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
514 }
515
516 static VkResult
anv_batch_bo_list_clone(const struct list_head * list,struct anv_cmd_buffer * cmd_buffer,struct list_head * new_list)517 anv_batch_bo_list_clone(const struct list_head *list,
518 struct anv_cmd_buffer *cmd_buffer,
519 struct list_head *new_list)
520 {
521 VkResult result = VK_SUCCESS;
522
523 list_inithead(new_list);
524
525 struct anv_batch_bo *prev_bbo = NULL;
526 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
527 struct anv_batch_bo *new_bbo = NULL;
528 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
529 if (result != VK_SUCCESS)
530 break;
531 list_addtail(&new_bbo->link, new_list);
532
533 if (prev_bbo)
534 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
535
536 prev_bbo = new_bbo;
537 }
538
539 if (result != VK_SUCCESS) {
540 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
541 list_del(&bbo->link);
542 anv_batch_bo_destroy(bbo, cmd_buffer);
543 }
544 }
545
546 return result;
547 }
548
549 /*-----------------------------------------------------------------------*
550 * Functions related to anv_batch_bo
551 *-----------------------------------------------------------------------*/
552
553 static struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer * cmd_buffer)554 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
555 {
556 return list_entry(cmd_buffer->batch_bos.prev, struct anv_batch_bo, link);
557 }
558
559 struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer * cmd_buffer)560 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
561 {
562 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
563 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
564 return (struct anv_address) {
565 .bo = pool->block_pool.bo,
566 .offset = bt_block->offset - pool->start_offset,
567 };
568 }
569
570 static void
emit_batch_buffer_start(struct anv_cmd_buffer * cmd_buffer,struct anv_bo * bo,uint32_t offset)571 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
572 struct anv_bo *bo, uint32_t offset)
573 {
574 /* In gfx8+ the address field grew to two dwords to accommodate 48 bit
575 * offsets. The high 16 bits are in the last dword, so we can use the gfx8
576 * version in either case, as long as we set the instruction length in the
577 * header accordingly. This means that we always emit three dwords here
578 * and all the padding and adjustment we do in this file works for all
579 * gens.
580 */
581
582 #define GFX7_MI_BATCH_BUFFER_START_length 2
583 #define GFX7_MI_BATCH_BUFFER_START_length_bias 2
584
585 const uint32_t gfx7_length =
586 GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
587 const uint32_t gfx8_length =
588 GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
589
590 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
591 bbs.DWordLength = cmd_buffer->device->info->ver < 8 ?
592 gfx7_length : gfx8_length;
593 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
594 bbs.AddressSpaceIndicator = ASI_PPGTT;
595 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
596 }
597 }
598
599 static void
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo)600 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
601 struct anv_batch_bo *bbo)
602 {
603 struct anv_batch *batch = &cmd_buffer->batch;
604 struct anv_batch_bo *current_bbo =
605 anv_cmd_buffer_current_batch_bo(cmd_buffer);
606
607 /* We set the end of the batch a little short so we would be sure we
608 * have room for the chaining command. Since we're about to emit the
609 * chaining command, let's set it back where it should go.
610 */
611 batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
612 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
613
614 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
615
616 anv_batch_bo_finish(current_bbo, batch);
617 }
618
619 static void
anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer * cmd_buffer_from,struct anv_cmd_buffer * cmd_buffer_to)620 anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
621 struct anv_cmd_buffer *cmd_buffer_to)
622 {
623 assert(!anv_use_relocations(cmd_buffer_from->device->physical));
624
625 uint32_t *bb_start = cmd_buffer_from->batch_end;
626
627 struct anv_batch_bo *last_bbo =
628 list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
629 struct anv_batch_bo *first_bbo =
630 list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
631
632 struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
633 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
634 .SecondLevelBatchBuffer = Firstlevelbatch,
635 .AddressSpaceIndicator = ASI_PPGTT,
636 .BatchBufferStartAddress = (struct anv_address) { first_bbo->bo, 0 },
637 };
638 struct anv_batch local_batch = {
639 .start = last_bbo->bo->map,
640 .end = last_bbo->bo->map + last_bbo->bo->size,
641 .relocs = &last_bbo->relocs,
642 .alloc = &cmd_buffer_from->vk.pool->alloc,
643 };
644
645 __anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
646
647 last_bbo->chained = true;
648 }
649
650 static void
anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer * cmd_buffer)651 anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
652 {
653 assert(!anv_use_relocations(cmd_buffer->device->physical));
654
655 struct anv_batch_bo *last_bbo =
656 list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
657 last_bbo->chained = false;
658
659 uint32_t *batch = cmd_buffer->batch_end;
660 anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
661 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
662 }
663
664 static VkResult
anv_cmd_buffer_chain_batch(struct anv_batch * batch,void * _data)665 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
666 {
667 struct anv_cmd_buffer *cmd_buffer = _data;
668 struct anv_batch_bo *new_bbo = NULL;
669 /* Cap reallocation to chunk. */
670 uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
671 ANV_MAX_CMD_BUFFER_BATCH_SIZE);
672
673 VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
674 if (result != VK_SUCCESS)
675 return result;
676
677 cmd_buffer->total_batch_size += alloc_size;
678
679 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
680 if (seen_bbo == NULL) {
681 anv_batch_bo_destroy(new_bbo, cmd_buffer);
682 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
683 }
684 *seen_bbo = new_bbo;
685
686 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
687
688 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
689
690 anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
691
692 return VK_SUCCESS;
693 }
694
695 static VkResult
anv_cmd_buffer_grow_batch(struct anv_batch * batch,void * _data)696 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
697 {
698 struct anv_cmd_buffer *cmd_buffer = _data;
699 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
700
701 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
702 GFX8_MI_BATCH_BUFFER_START_length * 4);
703
704 return VK_SUCCESS;
705 }
706
707 /** Allocate a binding table
708 *
709 * This function allocates a binding table. This is a bit more complicated
710 * than one would think due to a combination of Vulkan driver design and some
711 * unfortunate hardware restrictions.
712 *
713 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
714 * the binding table pointer which means that all binding tables need to live
715 * in the bottom 64k of surface state base address. The way the GL driver has
716 * classically dealt with this restriction is to emit all surface states
717 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
718 * isn't really an option in Vulkan for a couple of reasons:
719 *
720 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
721 * to live in their own buffer and we have to be able to re-emit
722 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
723 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
724 * (it's not that hard to hit 64k of just binding tables), we allocate
725 * surface state objects up-front when VkImageView is created. In order
726 * for this to work, surface state objects need to be allocated from a
727 * global buffer.
728 *
729 * 2) We tried to design the surface state system in such a way that it's
730 * already ready for bindless texturing. The way bindless texturing works
731 * on our hardware is that you have a big pool of surface state objects
732 * (with its own state base address) and the bindless handles are simply
733 * offsets into that pool. With the architecture we chose, we already
734 * have that pool and it's exactly the same pool that we use for regular
735 * surface states so we should already be ready for bindless.
736 *
737 * 3) For render targets, we need to be able to fill out the surface states
738 * later in vkBeginRenderPass so that we can assign clear colors
739 * correctly. One way to do this would be to just create the surface
740 * state data and then repeatedly copy it into the surface state BO every
741 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
742 * rather annoying and just being able to allocate them up-front and
743 * re-use them for the entire render pass.
744 *
745 * While none of these are technically blockers for emitting state on the fly
746 * like we do in GL, the ability to have a single surface state pool is
747 * simplifies things greatly. Unfortunately, it comes at a cost...
748 *
749 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
750 * place the binding tables just anywhere in surface state base address.
751 * Because 64k isn't a whole lot of space, we can't simply restrict the
752 * surface state buffer to 64k, we have to be more clever. The solution we've
753 * chosen is to have a block pool with a maximum size of 2G that starts at
754 * zero and grows in both directions. All surface states are allocated from
755 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
756 * binding tables from the bottom of the pool (negative offsets). Every time
757 * we allocate a new binding table block, we set surface state base address to
758 * point to the bottom of the binding table block. This way all of the
759 * binding tables in the block are in the bottom 64k of surface state base
760 * address. When we fill out the binding table, we add the distance between
761 * the bottom of our binding table block and zero of the block pool to the
762 * surface state offsets so that they are correct relative to out new surface
763 * state base address at the bottom of the binding table block.
764 *
765 * \see adjust_relocations_from_block_pool()
766 * \see adjust_relocations_too_block_pool()
767 *
768 * \param[in] entries The number of surface state entries the binding
769 * table should be able to hold.
770 *
771 * \param[out] state_offset The offset surface surface state base address
772 * where the surface states live. This must be
773 * added to the surface state offset when it is
774 * written into the binding table entry.
775 *
776 * \return An anv_state representing the binding table
777 */
778 struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t entries,uint32_t * state_offset)779 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
780 uint32_t entries, uint32_t *state_offset)
781 {
782 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
783
784 uint32_t bt_size = align(entries * 4, 32);
785
786 struct anv_state state = cmd_buffer->bt_next;
787 if (bt_size > state.alloc_size)
788 return (struct anv_state) { 0 };
789
790 state.alloc_size = bt_size;
791 cmd_buffer->bt_next.offset += bt_size;
792 cmd_buffer->bt_next.map += bt_size;
793 cmd_buffer->bt_next.alloc_size -= bt_size;
794
795 assert(bt_block->offset < 0);
796 *state_offset = -bt_block->offset;
797
798 return state;
799 }
800
801 struct anv_state
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer * cmd_buffer)802 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
803 {
804 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
805 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
806 isl_dev->ss.size, isl_dev->ss.align);
807 }
808
809 struct anv_state
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer * cmd_buffer,uint32_t size,uint32_t alignment)810 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
811 uint32_t size, uint32_t alignment)
812 {
813 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
814 size, alignment);
815 }
816
817 VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer * cmd_buffer)818 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
819 {
820 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
821 if (bt_block == NULL) {
822 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
823 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
824 }
825
826 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
827
828 /* The bt_next state is a rolling state (we update it as we suballocate
829 * from it) which is relative to the start of the binding table block.
830 */
831 cmd_buffer->bt_next = *bt_block;
832 cmd_buffer->bt_next.offset = 0;
833
834 return VK_SUCCESS;
835 }
836
837 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)838 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
839 {
840 struct anv_batch_bo *batch_bo = NULL;
841 VkResult result;
842
843 list_inithead(&cmd_buffer->batch_bos);
844
845 cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
846
847 result = anv_batch_bo_create(cmd_buffer,
848 cmd_buffer->total_batch_size,
849 &batch_bo);
850 if (result != VK_SUCCESS)
851 return result;
852
853 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
854
855 cmd_buffer->batch.alloc = &cmd_buffer->vk.pool->alloc;
856 cmd_buffer->batch.user_data = cmd_buffer;
857
858 if (cmd_buffer->device->can_chain_batches) {
859 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
860 } else {
861 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
862 }
863
864 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
865 GFX8_MI_BATCH_BUFFER_START_length * 4);
866
867 int success = u_vector_init_pow2(&cmd_buffer->seen_bbos, 8,
868 sizeof(struct anv_bo *));
869 if (!success)
870 goto fail_batch_bo;
871
872 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
873
874 success = u_vector_init(&cmd_buffer->bt_block_states, 8,
875 sizeof(struct anv_state));
876 if (!success)
877 goto fail_seen_bbos;
878
879 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
880 &cmd_buffer->vk.pool->alloc);
881 if (result != VK_SUCCESS)
882 goto fail_bt_blocks;
883 cmd_buffer->last_ss_pool_center = 0;
884
885 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
886 if (result != VK_SUCCESS)
887 goto fail_bt_blocks;
888
889 return VK_SUCCESS;
890
891 fail_bt_blocks:
892 u_vector_finish(&cmd_buffer->bt_block_states);
893 fail_seen_bbos:
894 u_vector_finish(&cmd_buffer->seen_bbos);
895 fail_batch_bo:
896 anv_batch_bo_destroy(batch_bo, cmd_buffer);
897
898 return result;
899 }
900
901 void
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)902 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
903 {
904 struct anv_state *bt_block;
905 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
906 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
907 u_vector_finish(&cmd_buffer->bt_block_states);
908
909 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->vk.pool->alloc);
910
911 u_vector_finish(&cmd_buffer->seen_bbos);
912
913 /* Destroy all of the batch buffers */
914 list_for_each_entry_safe(struct anv_batch_bo, bbo,
915 &cmd_buffer->batch_bos, link) {
916 list_del(&bbo->link);
917 anv_batch_bo_destroy(bbo, cmd_buffer);
918 }
919 }
920
921 void
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)922 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
923 {
924 /* Delete all but the first batch bo */
925 assert(!list_is_empty(&cmd_buffer->batch_bos));
926 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
927 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
928 list_del(&bbo->link);
929 anv_batch_bo_destroy(bbo, cmd_buffer);
930 }
931 assert(!list_is_empty(&cmd_buffer->batch_bos));
932
933 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
934 &cmd_buffer->batch,
935 GFX8_MI_BATCH_BUFFER_START_length * 4);
936
937 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
938 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
939 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
940 }
941 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
942 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
943 cmd_buffer->bt_next.offset = 0;
944
945 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
946 cmd_buffer->last_ss_pool_center = 0;
947
948 /* Reset the list of seen buffers */
949 cmd_buffer->seen_bbos.head = 0;
950 cmd_buffer->seen_bbos.tail = 0;
951
952 struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
953
954 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
955
956
957 assert(!cmd_buffer->device->can_chain_batches ||
958 first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
959 cmd_buffer->total_batch_size = first_bbo->bo->size;
960 }
961
962 void
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer * cmd_buffer)963 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
964 {
965 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
966
967 if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
968 /* When we start a batch buffer, we subtract a certain amount of
969 * padding from the end to ensure that we always have room to emit a
970 * BATCH_BUFFER_START to chain to the next BO. We need to remove
971 * that padding before we end the batch; otherwise, we may end up
972 * with our BATCH_BUFFER_END in another BO.
973 */
974 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
975 assert(cmd_buffer->batch.start == batch_bo->bo->map);
976 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
977
978 /* Save end instruction location to override it later. */
979 cmd_buffer->batch_end = cmd_buffer->batch.next;
980
981 /* If we can chain this command buffer to another one, leave some place
982 * for the jump instruction.
983 */
984 batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
985 if (batch_bo->chained)
986 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
987 else
988 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
989
990 /* Round batch up to an even number of dwords. */
991 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
992 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
993
994 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
995 } else {
996 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
997 /* If this is a secondary command buffer, we need to determine the
998 * mode in which it will be executed with vkExecuteCommands. We
999 * determine this statically here so that this stays in sync with the
1000 * actual ExecuteCommands implementation.
1001 */
1002 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
1003 if (!cmd_buffer->device->can_chain_batches) {
1004 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
1005 } else if (cmd_buffer->device->physical->use_call_secondary) {
1006 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
1007 /* If the secondary command buffer begins & ends in the same BO and
1008 * its length is less than the length of CS prefetch, add some NOOPs
1009 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
1010 * prefetch.
1011 */
1012 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
1013 const struct intel_device_info *devinfo = cmd_buffer->device->info;
1014 const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
1015 /* Careful to have everything in signed integer. */
1016 int32_t prefetch_len = devinfo->engine_class_prefetch[engine_class];
1017 int batch_len = cmd_buffer->batch.next - cmd_buffer->batch.start;
1018
1019 for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
1020 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1021 }
1022
1023 void *jump_addr =
1024 anv_batch_emitn(&cmd_buffer->batch,
1025 GFX8_MI_BATCH_BUFFER_START_length,
1026 GFX8_MI_BATCH_BUFFER_START,
1027 .AddressSpaceIndicator = ASI_PPGTT,
1028 .SecondLevelBatchBuffer = Firstlevelbatch) +
1029 (GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
1030 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
1031
1032 /* The emit above may have caused us to chain batch buffers which
1033 * would mean that batch_bo is no longer valid.
1034 */
1035 batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
1036 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
1037 (length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
1038 /* If the secondary has exactly one batch buffer in its list *and*
1039 * that batch buffer is less than half of the maximum size, we're
1040 * probably better of simply copying it into our batch.
1041 */
1042 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
1043 } else if (!(cmd_buffer->usage_flags &
1044 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
1045 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
1046
1047 /* In order to chain, we need this command buffer to contain an
1048 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
1049 * It doesn't matter where it points now so long as has a valid
1050 * relocation. We'll adjust it later as part of the chaining
1051 * process.
1052 *
1053 * We set the end of the batch a little short so we would be sure we
1054 * have room for the chaining command. Since we're about to emit the
1055 * chaining command, let's set it back where it should go.
1056 */
1057 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
1058 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1059 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
1060
1061 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
1062 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1063 } else {
1064 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
1065 }
1066 }
1067
1068 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
1069 }
1070
1071 static VkResult
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer * cmd_buffer,struct list_head * list)1072 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
1073 struct list_head *list)
1074 {
1075 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1076 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1077 if (bbo_ptr == NULL)
1078 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
1079
1080 *bbo_ptr = bbo;
1081 }
1082
1083 return VK_SUCCESS;
1084 }
1085
1086 void
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer * primary,struct anv_cmd_buffer * secondary)1087 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1088 struct anv_cmd_buffer *secondary)
1089 {
1090 anv_measure_add_secondary(primary, secondary);
1091 switch (secondary->exec_mode) {
1092 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1093 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1094 break;
1095 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1096 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1097 unsigned length = secondary->batch.end - secondary->batch.start;
1098 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1099 GFX8_MI_BATCH_BUFFER_START_length * 4);
1100 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1101 break;
1102 }
1103 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1104 struct anv_batch_bo *first_bbo =
1105 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1106 struct anv_batch_bo *last_bbo =
1107 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1108
1109 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1110
1111 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1112 assert(primary->batch.start == this_bbo->bo->map);
1113 uint32_t offset = primary->batch.next - primary->batch.start;
1114
1115 /* Make the tail of the secondary point back to right after the
1116 * MI_BATCH_BUFFER_START in the primary batch.
1117 */
1118 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1119
1120 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1121 break;
1122 }
1123 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1124 struct list_head copy_list;
1125 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1126 secondary,
1127 ©_list);
1128 if (result != VK_SUCCESS)
1129 return; /* FIXME */
1130
1131 anv_cmd_buffer_add_seen_bbos(primary, ©_list);
1132
1133 struct anv_batch_bo *first_bbo =
1134 list_first_entry(©_list, struct anv_batch_bo, link);
1135 struct anv_batch_bo *last_bbo =
1136 list_last_entry(©_list, struct anv_batch_bo, link);
1137
1138 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1139
1140 list_splicetail(©_list, &primary->batch_bos);
1141
1142 anv_batch_bo_continue(last_bbo, &primary->batch,
1143 GFX8_MI_BATCH_BUFFER_START_length * 4);
1144 break;
1145 }
1146 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1147 struct anv_batch_bo *first_bbo =
1148 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1149
1150 uint64_t *write_return_addr =
1151 anv_batch_emitn(&primary->batch,
1152 GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1153 GFX8_MI_STORE_DATA_IMM,
1154 .Address = secondary->return_addr)
1155 + (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1156
1157 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1158
1159 *write_return_addr =
1160 anv_address_physical(anv_batch_address(&primary->batch,
1161 primary->batch.next));
1162
1163 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1164 break;
1165 }
1166 default:
1167 assert(!"Invalid execution mode");
1168 }
1169
1170 anv_reloc_list_append(&primary->surface_relocs, &primary->vk.pool->alloc,
1171 &secondary->surface_relocs, 0);
1172 }
1173
1174 struct anv_execbuf {
1175 struct drm_i915_gem_execbuffer2 execbuf;
1176
1177 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1178
1179 struct drm_i915_gem_exec_object2 * objects;
1180 uint32_t bo_count;
1181 struct anv_bo ** bos;
1182
1183 /* Allocated length of the 'objects' and 'bos' arrays */
1184 uint32_t array_length;
1185
1186 uint32_t syncobj_count;
1187 uint32_t syncobj_array_length;
1188 struct drm_i915_gem_exec_fence * syncobjs;
1189 uint64_t * syncobj_values;
1190
1191 /* List of relocations for surface states, only used with platforms not
1192 * using softpin.
1193 */
1194 void * surface_states_relocs;
1195
1196 uint32_t cmd_buffer_count;
1197 struct anv_query_pool *perf_query_pool;
1198
1199 /* Indicates whether any of the command buffers have relocations. This
1200 * doesn't not necessarily mean we'll need the kernel to process them. It
1201 * might be that a previous execbuf has already placed things in the VMA
1202 * and we can make i915 skip the relocations.
1203 */
1204 bool has_relocs;
1205
1206 const VkAllocationCallbacks * alloc;
1207 VkSystemAllocationScope alloc_scope;
1208
1209 int perf_query_pass;
1210 };
1211
1212 static void
anv_execbuf_finish(struct anv_execbuf * exec)1213 anv_execbuf_finish(struct anv_execbuf *exec)
1214 {
1215 vk_free(exec->alloc, exec->syncobjs);
1216 vk_free(exec->alloc, exec->syncobj_values);
1217 vk_free(exec->alloc, exec->surface_states_relocs);
1218 vk_free(exec->alloc, exec->objects);
1219 vk_free(exec->alloc, exec->bos);
1220 }
1221
1222 static void
anv_execbuf_add_ext(struct anv_execbuf * exec,uint32_t ext_name,struct i915_user_extension * ext)1223 anv_execbuf_add_ext(struct anv_execbuf *exec,
1224 uint32_t ext_name,
1225 struct i915_user_extension *ext)
1226 {
1227 __u64 *iter = &exec->execbuf.cliprects_ptr;
1228
1229 exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1230
1231 while (*iter != 0) {
1232 iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1233 }
1234
1235 ext->name = ext_name;
1236
1237 *iter = (uintptr_t) ext;
1238 }
1239
1240 static VkResult
1241 anv_execbuf_add_bo_bitset(struct anv_device *device,
1242 struct anv_execbuf *exec,
1243 uint32_t dep_words,
1244 BITSET_WORD *deps,
1245 uint32_t extra_flags);
1246
1247 static VkResult
anv_execbuf_add_bo(struct anv_device * device,struct anv_execbuf * exec,struct anv_bo * bo,struct anv_reloc_list * relocs,uint32_t extra_flags)1248 anv_execbuf_add_bo(struct anv_device *device,
1249 struct anv_execbuf *exec,
1250 struct anv_bo *bo,
1251 struct anv_reloc_list *relocs,
1252 uint32_t extra_flags)
1253 {
1254 struct drm_i915_gem_exec_object2 *obj = NULL;
1255
1256 bo = anv_bo_unwrap(bo);
1257
1258 if (bo->exec_obj_index < exec->bo_count &&
1259 exec->bos[bo->exec_obj_index] == bo)
1260 obj = &exec->objects[bo->exec_obj_index];
1261
1262 if (obj == NULL) {
1263 /* We've never seen this one before. Add it to the list and assign
1264 * an id that we can use later.
1265 */
1266 if (exec->bo_count >= exec->array_length) {
1267 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1268
1269 struct drm_i915_gem_exec_object2 *new_objects =
1270 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1271 if (new_objects == NULL)
1272 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1273
1274 struct anv_bo **new_bos =
1275 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1276 if (new_bos == NULL) {
1277 vk_free(exec->alloc, new_objects);
1278 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1279 }
1280
1281 if (exec->objects) {
1282 memcpy(new_objects, exec->objects,
1283 exec->bo_count * sizeof(*new_objects));
1284 memcpy(new_bos, exec->bos,
1285 exec->bo_count * sizeof(*new_bos));
1286 }
1287
1288 vk_free(exec->alloc, exec->objects);
1289 vk_free(exec->alloc, exec->bos);
1290
1291 exec->objects = new_objects;
1292 exec->bos = new_bos;
1293 exec->array_length = new_len;
1294 }
1295
1296 assert(exec->bo_count < exec->array_length);
1297
1298 bo->exec_obj_index = exec->bo_count++;
1299 obj = &exec->objects[bo->exec_obj_index];
1300 exec->bos[bo->exec_obj_index] = bo;
1301
1302 obj->handle = bo->gem_handle;
1303 obj->relocation_count = 0;
1304 obj->relocs_ptr = 0;
1305 obj->alignment = 0;
1306 obj->offset = bo->offset;
1307 obj->flags = bo->flags | extra_flags;
1308 obj->rsvd1 = 0;
1309 obj->rsvd2 = 0;
1310 }
1311
1312 if (extra_flags & EXEC_OBJECT_WRITE) {
1313 obj->flags |= EXEC_OBJECT_WRITE;
1314 obj->flags &= ~EXEC_OBJECT_ASYNC;
1315 }
1316
1317 if (relocs != NULL) {
1318 assert(obj->relocation_count == 0);
1319
1320 if (relocs->num_relocs > 0) {
1321 /* This is the first time we've ever seen a list of relocations for
1322 * this BO. Go ahead and set the relocations and then walk the list
1323 * of relocations and add them all.
1324 */
1325 exec->has_relocs = true;
1326 obj->relocation_count = relocs->num_relocs;
1327 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1328
1329 for (size_t i = 0; i < relocs->num_relocs; i++) {
1330 VkResult result;
1331
1332 /* A quick sanity check on relocations */
1333 assert(relocs->relocs[i].offset < bo->size);
1334 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1335 NULL, extra_flags);
1336 if (result != VK_SUCCESS)
1337 return result;
1338 }
1339 }
1340
1341 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1342 relocs->deps, extra_flags);
1343 }
1344
1345 return VK_SUCCESS;
1346 }
1347
1348 /* Add BO dependencies to execbuf */
1349 static VkResult
anv_execbuf_add_bo_bitset(struct anv_device * device,struct anv_execbuf * exec,uint32_t dep_words,BITSET_WORD * deps,uint32_t extra_flags)1350 anv_execbuf_add_bo_bitset(struct anv_device *device,
1351 struct anv_execbuf *exec,
1352 uint32_t dep_words,
1353 BITSET_WORD *deps,
1354 uint32_t extra_flags)
1355 {
1356 for (uint32_t w = 0; w < dep_words; w++) {
1357 BITSET_WORD mask = deps[w];
1358 while (mask) {
1359 int i = u_bit_scan(&mask);
1360 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1361 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1362 assert(bo->refcount > 0);
1363 VkResult result =
1364 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1365 if (result != VK_SUCCESS)
1366 return result;
1367 }
1368 }
1369
1370 return VK_SUCCESS;
1371 }
1372
1373 static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_reloc_list * list)1374 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1375 struct anv_reloc_list *list)
1376 {
1377 for (size_t i = 0; i < list->num_relocs; i++) {
1378 list->relocs[i].target_handle =
1379 anv_bo_unwrap(list->reloc_bos[i])->exec_obj_index;
1380 }
1381 }
1382
1383 static void
adjust_relocations_from_state_pool(struct anv_state_pool * pool,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1384 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1385 struct anv_reloc_list *relocs,
1386 uint32_t last_pool_center_bo_offset)
1387 {
1388 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1389 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1390
1391 for (size_t i = 0; i < relocs->num_relocs; i++) {
1392 /* All of the relocations from this block pool to other BO's should
1393 * have been emitted relative to the surface block pool center. We
1394 * need to add the center offset to make them relative to the
1395 * beginning of the actual GEM bo.
1396 */
1397 relocs->relocs[i].offset += delta;
1398 }
1399 }
1400
1401 static void
adjust_relocations_to_state_pool(struct anv_state_pool * pool,struct anv_bo * from_bo,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1402 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1403 struct anv_bo *from_bo,
1404 struct anv_reloc_list *relocs,
1405 uint32_t last_pool_center_bo_offset)
1406 {
1407 assert(!from_bo->is_wrapper);
1408 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1409 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1410
1411 /* When we initially emit relocations into a block pool, we don't
1412 * actually know what the final center_bo_offset will be so we just emit
1413 * it as if center_bo_offset == 0. Now that we know what the center
1414 * offset is, we need to walk the list of relocations and adjust any
1415 * relocations that point to the pool bo with the correct offset.
1416 */
1417 for (size_t i = 0; i < relocs->num_relocs; i++) {
1418 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1419 /* Adjust the delta value in the relocation to correctly
1420 * correspond to the new delta. Initially, this value may have
1421 * been negative (if treated as unsigned), but we trust in
1422 * uint32_t roll-over to fix that for us at this point.
1423 */
1424 relocs->relocs[i].delta += delta;
1425
1426 /* Since the delta has changed, we need to update the actual
1427 * relocated value with the new presumed value. This function
1428 * should only be called on batch buffers, so we know it isn't in
1429 * use by the GPU at the moment.
1430 */
1431 assert(relocs->relocs[i].offset < from_bo->size);
1432 write_reloc(pool->block_pool.device,
1433 from_bo->map + relocs->relocs[i].offset,
1434 relocs->relocs[i].presumed_offset +
1435 relocs->relocs[i].delta, false);
1436 }
1437 }
1438 }
1439
1440 static void
anv_reloc_list_apply(struct anv_device * device,struct anv_reloc_list * list,struct anv_bo * bo,bool always_relocate)1441 anv_reloc_list_apply(struct anv_device *device,
1442 struct anv_reloc_list *list,
1443 struct anv_bo *bo,
1444 bool always_relocate)
1445 {
1446 bo = anv_bo_unwrap(bo);
1447
1448 for (size_t i = 0; i < list->num_relocs; i++) {
1449 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1450 if (list->relocs[i].presumed_offset == target_bo->offset &&
1451 !always_relocate)
1452 continue;
1453
1454 void *p = bo->map + list->relocs[i].offset;
1455 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1456 list->relocs[i].presumed_offset = target_bo->offset;
1457 }
1458 }
1459
1460 /**
1461 * This function applies the relocation for a command buffer and writes the
1462 * actual addresses into the buffers as per what we were told by the kernel on
1463 * the previous execbuf2 call. This should be safe to do because, for each
1464 * relocated address, we have two cases:
1465 *
1466 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1467 * not in use by the GPU so updating the address is 100% ok. It won't be
1468 * in-use by the GPU (from our context) again until the next execbuf2
1469 * happens. If the kernel decides to move it in the next execbuf2, it
1470 * will have to do the relocations itself, but that's ok because it should
1471 * have all of the information needed to do so.
1472 *
1473 * 2) The target BO is active (as seen by the kernel). In this case, it
1474 * hasn't moved since the last execbuffer2 call because GTT shuffling
1475 * *only* happens when the BO is idle. (From our perspective, it only
1476 * happens inside the execbuffer2 ioctl, but the shuffling may be
1477 * triggered by another ioctl, with full-ppgtt this is limited to only
1478 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1479 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1480 * address and the relocated value we are writing into the BO will be the
1481 * same as the value that is already there.
1482 *
1483 * There is also a possibility that the target BO is active but the exact
1484 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1485 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1486 * may be stale but it's still safe to write the relocation because that
1487 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1488 * won't be until the next execbuf2 call.
1489 *
1490 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1491 * need to bother. We want to do this because the surface state buffer is
1492 * used by every command buffer so, if the kernel does the relocations, it
1493 * will always be busy and the kernel will always stall. This is also
1494 * probably the fastest mechanism for doing relocations since the kernel would
1495 * have to make a full copy of all the relocations lists.
1496 */
1497 static bool
execbuf_can_skip_relocations(struct anv_execbuf * exec)1498 execbuf_can_skip_relocations(struct anv_execbuf *exec)
1499 {
1500 if (!exec->has_relocs)
1501 return true;
1502
1503 static int userspace_relocs = -1;
1504 if (userspace_relocs < 0)
1505 userspace_relocs = debug_get_bool_option("ANV_USERSPACE_RELOCS", true);
1506 if (!userspace_relocs)
1507 return false;
1508
1509 /* First, we have to check to see whether or not we can even do the
1510 * relocation. New buffers which have never been submitted to the kernel
1511 * don't have a valid offset so we need to let the kernel do relocations so
1512 * that we can get offsets for them. On future execbuf2 calls, those
1513 * buffers will have offsets and we will be able to skip relocating.
1514 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1515 */
1516 for (uint32_t i = 0; i < exec->bo_count; i++) {
1517 assert(!exec->bos[i]->is_wrapper);
1518 if (exec->bos[i]->offset == (uint64_t)-1)
1519 return false;
1520 }
1521
1522 return true;
1523 }
1524
1525 static void
relocate_cmd_buffer(struct anv_cmd_buffer * cmd_buffer,struct anv_execbuf * exec)1526 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1527 struct anv_execbuf *exec)
1528 {
1529 /* Since surface states are shared between command buffers and we don't
1530 * know what order they will be submitted to the kernel, we don't know
1531 * what address is actually written in the surface state object at any
1532 * given time. The only option is to always relocate them.
1533 */
1534 struct anv_bo *surface_state_bo =
1535 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1536 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1537 surface_state_bo,
1538 true /* always relocate surface states */);
1539
1540 /* Since we own all of the batch buffers, we know what values are stored
1541 * in the relocated addresses and only have to update them if the offsets
1542 * have changed.
1543 */
1544 struct anv_batch_bo **bbo;
1545 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1546 anv_reloc_list_apply(cmd_buffer->device,
1547 &(*bbo)->relocs, (*bbo)->bo, false);
1548 }
1549
1550 for (uint32_t i = 0; i < exec->bo_count; i++)
1551 exec->objects[i].offset = exec->bos[i]->offset;
1552 }
1553
1554 static void
reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer * cmd_buffer)1555 reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer *cmd_buffer)
1556 {
1557 /* In the case where we fall back to doing kernel relocations, we need to
1558 * ensure that the relocation list is valid. All relocations on the batch
1559 * buffers are already valid and kept up-to-date. Since surface states are
1560 * shared between command buffers and we don't know what order they will be
1561 * submitted to the kernel, we don't know what address is actually written
1562 * in the surface state object at any given time. The only option is to set
1563 * a bogus presumed offset and let the kernel relocate them.
1564 */
1565 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1566 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1567 }
1568
1569 static VkResult
anv_execbuf_add_syncobj(struct anv_device * device,struct anv_execbuf * exec,uint32_t syncobj,uint32_t flags,uint64_t timeline_value)1570 anv_execbuf_add_syncobj(struct anv_device *device,
1571 struct anv_execbuf *exec,
1572 uint32_t syncobj,
1573 uint32_t flags,
1574 uint64_t timeline_value)
1575 {
1576 if (exec->syncobj_count >= exec->syncobj_array_length) {
1577 uint32_t new_len = MAX2(exec->syncobj_array_length * 2, 16);
1578
1579 struct drm_i915_gem_exec_fence *new_syncobjs =
1580 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobjs),
1581 8, exec->alloc_scope);
1582 if (!new_syncobjs)
1583 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1584
1585 if (exec->syncobjs)
1586 typed_memcpy(new_syncobjs, exec->syncobjs, exec->syncobj_count);
1587
1588 exec->syncobjs = new_syncobjs;
1589
1590 if (exec->syncobj_values) {
1591 uint64_t *new_syncobj_values =
1592 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobj_values),
1593 8, exec->alloc_scope);
1594 if (!new_syncobj_values)
1595 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1596
1597 typed_memcpy(new_syncobj_values, exec->syncobj_values,
1598 exec->syncobj_count);
1599
1600 exec->syncobj_values = new_syncobj_values;
1601 }
1602
1603 exec->syncobj_array_length = new_len;
1604 }
1605
1606 if (timeline_value && !exec->syncobj_values) {
1607 exec->syncobj_values =
1608 vk_zalloc(exec->alloc, exec->syncobj_array_length *
1609 sizeof(*exec->syncobj_values),
1610 8, exec->alloc_scope);
1611 if (!exec->syncobj_values)
1612 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1613 }
1614
1615 exec->syncobjs[exec->syncobj_count] = (struct drm_i915_gem_exec_fence) {
1616 .handle = syncobj,
1617 .flags = flags,
1618 };
1619 if (exec->syncobj_values)
1620 exec->syncobj_values[exec->syncobj_count] = timeline_value;
1621
1622 exec->syncobj_count++;
1623
1624 return VK_SUCCESS;
1625 }
1626
1627 static VkResult
anv_execbuf_add_sync(struct anv_device * device,struct anv_execbuf * execbuf,struct vk_sync * sync,bool is_signal,uint64_t value)1628 anv_execbuf_add_sync(struct anv_device *device,
1629 struct anv_execbuf *execbuf,
1630 struct vk_sync *sync,
1631 bool is_signal,
1632 uint64_t value)
1633 {
1634 /* It's illegal to signal a timeline with value 0 because that's never
1635 * higher than the current value. A timeline wait on value 0 is always
1636 * trivial because 0 <= uint64_t always.
1637 */
1638 if ((sync->flags & VK_SYNC_IS_TIMELINE) && value == 0)
1639 return VK_SUCCESS;
1640
1641 if (vk_sync_is_anv_bo_sync(sync)) {
1642 struct anv_bo_sync *bo_sync =
1643 container_of(sync, struct anv_bo_sync, sync);
1644
1645 assert(is_signal == (bo_sync->state == ANV_BO_SYNC_STATE_RESET));
1646
1647 return anv_execbuf_add_bo(device, execbuf, bo_sync->bo, NULL,
1648 is_signal ? EXEC_OBJECT_WRITE : 0);
1649 } else if (vk_sync_type_is_drm_syncobj(sync->type)) {
1650 struct vk_drm_syncobj *syncobj = vk_sync_as_drm_syncobj(sync);
1651
1652 if (!(sync->flags & VK_SYNC_IS_TIMELINE))
1653 value = 0;
1654
1655 return anv_execbuf_add_syncobj(device, execbuf, syncobj->syncobj,
1656 is_signal ? I915_EXEC_FENCE_SIGNAL :
1657 I915_EXEC_FENCE_WAIT,
1658 value);
1659 }
1660
1661 unreachable("Invalid sync type");
1662 }
1663
1664 static VkResult
setup_execbuf_for_cmd_buffer(struct anv_execbuf * execbuf,struct anv_cmd_buffer * cmd_buffer)1665 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1666 struct anv_cmd_buffer *cmd_buffer)
1667 {
1668 struct anv_state_pool *ss_pool =
1669 &cmd_buffer->device->surface_state_pool;
1670
1671 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1672 cmd_buffer->last_ss_pool_center);
1673 VkResult result;
1674 if (anv_use_relocations(cmd_buffer->device->physical)) {
1675 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1676 * will get added automatically by processing relocations on the batch
1677 * buffer. We have to add the surface state BO manually because it has
1678 * relocations of its own that we need to be sure are processed.
1679 */
1680 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1681 ss_pool->block_pool.bo,
1682 &cmd_buffer->surface_relocs, 0);
1683 if (result != VK_SUCCESS)
1684 return result;
1685 } else {
1686 /* Add surface dependencies (BOs) to the execbuf */
1687 result = anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1688 cmd_buffer->surface_relocs.dep_words,
1689 cmd_buffer->surface_relocs.deps, 0);
1690 if (result != VK_SUCCESS)
1691 return result;
1692 }
1693
1694 /* First, we walk over all of the bos we've seen and add them and their
1695 * relocations to the validate list.
1696 */
1697 struct anv_batch_bo **bbo;
1698 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1699 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1700 cmd_buffer->last_ss_pool_center);
1701
1702 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1703 (*bbo)->bo, &(*bbo)->relocs, 0);
1704 if (result != VK_SUCCESS)
1705 return result;
1706 }
1707
1708 /* Now that we've adjusted all of the surface state relocations, we need to
1709 * record the surface state pool center so future executions of the command
1710 * buffer can adjust correctly.
1711 */
1712 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1713
1714 return VK_SUCCESS;
1715 }
1716
1717 static void
chain_command_buffers(struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1718 chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1719 uint32_t num_cmd_buffers)
1720 {
1721 if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1722 assert(num_cmd_buffers == 1);
1723 return;
1724 }
1725
1726 /* Chain the N-1 first batch buffers */
1727 for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1728 anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1729
1730 /* Put an end to the last one */
1731 anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1732 }
1733
1734 static VkResult
setup_execbuf_for_cmd_buffers(struct anv_execbuf * execbuf,struct anv_queue * queue,struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1735 setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1736 struct anv_queue *queue,
1737 struct anv_cmd_buffer **cmd_buffers,
1738 uint32_t num_cmd_buffers)
1739 {
1740 struct anv_device *device = queue->device;
1741 struct anv_state_pool *ss_pool = &device->surface_state_pool;
1742 VkResult result;
1743
1744 /* Edit the tail of the command buffers to chain them all together if they
1745 * can be.
1746 */
1747 chain_command_buffers(cmd_buffers, num_cmd_buffers);
1748
1749 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1750 anv_measure_submit(cmd_buffers[i]);
1751 result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1752 if (result != VK_SUCCESS)
1753 return result;
1754 }
1755
1756 /* Add all the global BOs to the object list for softpin case. */
1757 if (!anv_use_relocations(device->physical)) {
1758 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1759 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1760 if (result != VK_SUCCESS)
1761 return result;
1762 }
1763
1764 struct anv_block_pool *pool;
1765 pool = &device->dynamic_state_pool.block_pool;
1766 anv_block_pool_foreach_bo(bo, pool) {
1767 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1768 if (result != VK_SUCCESS)
1769 return result;
1770 }
1771
1772 pool = &device->general_state_pool.block_pool;
1773 anv_block_pool_foreach_bo(bo, pool) {
1774 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1775 if (result != VK_SUCCESS)
1776 return result;
1777 }
1778
1779 pool = &device->instruction_state_pool.block_pool;
1780 anv_block_pool_foreach_bo(bo, pool) {
1781 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1782 if (result != VK_SUCCESS)
1783 return result;
1784 }
1785
1786 pool = &device->binding_table_pool.block_pool;
1787 anv_block_pool_foreach_bo(bo, pool) {
1788 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1789 if (result != VK_SUCCESS)
1790 return result;
1791 }
1792
1793 /* Add the BOs for all user allocated memory objects because we can't
1794 * track after binding updates of VK_EXT_descriptor_indexing.
1795 */
1796 list_for_each_entry(struct anv_device_memory, mem,
1797 &device->memory_objects, link) {
1798 result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1799 if (result != VK_SUCCESS)
1800 return result;
1801 }
1802 } else {
1803 /* We do not support chaining primary command buffers without
1804 * softpin.
1805 */
1806 assert(num_cmd_buffers == 1);
1807 }
1808
1809 bool no_reloc = true;
1810 if (execbuf->has_relocs) {
1811 no_reloc = execbuf_can_skip_relocations(execbuf);
1812 if (no_reloc) {
1813 /* If we were able to successfully relocate everything, tell the
1814 * kernel that it can skip doing relocations. The requirement for
1815 * using NO_RELOC is:
1816 *
1817 * 1) The addresses written in the objects must match the
1818 * corresponding reloc.presumed_offset which in turn must match
1819 * the corresponding execobject.offset.
1820 *
1821 * 2) To avoid stalling, execobject.offset should match the current
1822 * address of that object within the active context.
1823 *
1824 * In order to satisfy all of the invariants that make userspace
1825 * relocations to be safe (see relocate_cmd_buffer()), we need to
1826 * further ensure that the addresses we use match those used by the
1827 * kernel for the most recent execbuf2.
1828 *
1829 * The kernel may still choose to do relocations anyway if something
1830 * has moved in the GTT. In this case, the relocation list still
1831 * needs to be valid. All relocations on the batch buffers are
1832 * already valid and kept up-to-date. For surface state relocations,
1833 * by applying the relocations in relocate_cmd_buffer, we ensured
1834 * that the address in the RENDER_SURFACE_STATE matches
1835 * presumed_offset, so it should be safe for the kernel to relocate
1836 * them as needed.
1837 */
1838 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1839 relocate_cmd_buffer(cmd_buffers[i], execbuf);
1840
1841 anv_reloc_list_apply(device, &cmd_buffers[i]->surface_relocs,
1842 device->surface_state_pool.block_pool.bo,
1843 true /* always relocate surface states */);
1844 }
1845 } else {
1846 /* In the case where we fall back to doing kernel relocations, we
1847 * need to ensure that the relocation list is valid. All relocations
1848 * on the batch buffers are already valid and kept up-to-date. Since
1849 * surface states are shared between command buffers and we don't
1850 * know what order they will be submitted to the kernel, we don't
1851 * know what address is actually written in the surface state object
1852 * at any given time. The only option is to set a bogus presumed
1853 * offset and let the kernel relocate them.
1854 */
1855 for (uint32_t i = 0; i < num_cmd_buffers; i++)
1856 reset_cmd_buffer_surface_offsets(cmd_buffers[i]);
1857 }
1858 }
1859
1860 struct anv_batch_bo *first_batch_bo =
1861 list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1862
1863 /* The kernel requires that the last entry in the validation list be the
1864 * batch buffer to execute. We can simply swap the element
1865 * corresponding to the first batch_bo in the chain with the last
1866 * element in the list.
1867 */
1868 if (first_batch_bo->bo->exec_obj_index != execbuf->bo_count - 1) {
1869 uint32_t idx = first_batch_bo->bo->exec_obj_index;
1870 uint32_t last_idx = execbuf->bo_count - 1;
1871
1872 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1873 assert(execbuf->bos[idx] == first_batch_bo->bo);
1874
1875 execbuf->objects[idx] = execbuf->objects[last_idx];
1876 execbuf->bos[idx] = execbuf->bos[last_idx];
1877 execbuf->bos[idx]->exec_obj_index = idx;
1878
1879 execbuf->objects[last_idx] = tmp_obj;
1880 execbuf->bos[last_idx] = first_batch_bo->bo;
1881 first_batch_bo->bo->exec_obj_index = last_idx;
1882 }
1883
1884 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1885 if (!anv_use_relocations(device->physical))
1886 assert(!execbuf->has_relocs);
1887
1888 /* Now we go through and fixup all of the relocation lists to point to the
1889 * correct indices in the object array (I915_EXEC_HANDLE_LUT). We have to
1890 * do this after we reorder the list above as some of the indices may have
1891 * changed.
1892 */
1893 struct anv_batch_bo **bbo;
1894 if (execbuf->has_relocs) {
1895 assert(num_cmd_buffers == 1);
1896 u_vector_foreach(bbo, &cmd_buffers[0]->seen_bbos)
1897 anv_cmd_buffer_process_relocs(cmd_buffers[0], &(*bbo)->relocs);
1898
1899 anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
1900 }
1901
1902 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
1903 if (device->physical->memory.need_flush) {
1904 __builtin_ia32_mfence();
1905 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1906 u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1907 intel_flush_range_no_fence((*bbo)->bo->map, (*bbo)->length);
1908 }
1909 }
1910 __builtin_ia32_mfence();
1911 }
1912 #endif
1913
1914 struct anv_batch *batch = &cmd_buffers[0]->batch;
1915 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1916 .buffers_ptr = (uintptr_t) execbuf->objects,
1917 .buffer_count = execbuf->bo_count,
1918 .batch_start_offset = 0,
1919 /* On platforms that cannot chain batch buffers because of the i915
1920 * command parser, we have to provide the batch length. Everywhere else
1921 * we'll chain batches so no point in passing a length.
1922 */
1923 .batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
1924 .cliprects_ptr = 0,
1925 .num_cliprects = 0,
1926 .DR1 = 0,
1927 .DR4 = 0,
1928 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | (no_reloc ? I915_EXEC_NO_RELOC : 0),
1929 .rsvd1 = device->context_id,
1930 .rsvd2 = 0,
1931 };
1932
1933 return VK_SUCCESS;
1934 }
1935
1936 static VkResult
setup_empty_execbuf(struct anv_execbuf * execbuf,struct anv_queue * queue)1937 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1938 {
1939 struct anv_device *device = queue->device;
1940 VkResult result = anv_execbuf_add_bo(device, execbuf,
1941 device->trivial_batch_bo,
1942 NULL, 0);
1943 if (result != VK_SUCCESS)
1944 return result;
1945
1946 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1947 .buffers_ptr = (uintptr_t) execbuf->objects,
1948 .buffer_count = execbuf->bo_count,
1949 .batch_start_offset = 0,
1950 .batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1951 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1952 .rsvd1 = device->context_id,
1953 .rsvd2 = 0,
1954 };
1955
1956 return VK_SUCCESS;
1957 }
1958
1959 static VkResult
setup_utrace_execbuf(struct anv_execbuf * execbuf,struct anv_queue * queue,struct anv_utrace_flush_copy * flush)1960 setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue,
1961 struct anv_utrace_flush_copy *flush)
1962 {
1963 struct anv_device *device = queue->device;
1964 VkResult result = anv_execbuf_add_bo(device, execbuf,
1965 flush->batch_bo,
1966 &flush->relocs, 0);
1967 if (result != VK_SUCCESS)
1968 return result;
1969
1970 result = anv_execbuf_add_sync(device, execbuf, flush->sync,
1971 true /* is_signal */, 0 /* value */);
1972 if (result != VK_SUCCESS)
1973 return result;
1974
1975 if (flush->batch_bo->exec_obj_index != execbuf->bo_count - 1) {
1976 uint32_t idx = flush->batch_bo->exec_obj_index;
1977 uint32_t last_idx = execbuf->bo_count - 1;
1978
1979 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1980 assert(execbuf->bos[idx] == flush->batch_bo);
1981
1982 execbuf->objects[idx] = execbuf->objects[last_idx];
1983 execbuf->bos[idx] = execbuf->bos[last_idx];
1984 execbuf->bos[idx]->exec_obj_index = idx;
1985
1986 execbuf->objects[last_idx] = tmp_obj;
1987 execbuf->bos[last_idx] = flush->batch_bo;
1988 flush->batch_bo->exec_obj_index = last_idx;
1989 }
1990
1991 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
1992 if (device->physical->memory.need_flush)
1993 intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
1994 #endif
1995
1996 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1997 .buffers_ptr = (uintptr_t) execbuf->objects,
1998 .buffer_count = execbuf->bo_count,
1999 .batch_start_offset = 0,
2000 .batch_len = flush->batch.next - flush->batch.start,
2001 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_FENCE_ARRAY | queue->exec_flags |
2002 (execbuf->has_relocs ? 0 : I915_EXEC_NO_RELOC),
2003 .rsvd1 = device->context_id,
2004 .rsvd2 = 0,
2005 .num_cliprects = execbuf->syncobj_count,
2006 .cliprects_ptr = (uintptr_t)execbuf->syncobjs,
2007 };
2008
2009 return VK_SUCCESS;
2010 }
2011
2012 static VkResult
anv_queue_exec_utrace_locked(struct anv_queue * queue,struct anv_utrace_flush_copy * flush)2013 anv_queue_exec_utrace_locked(struct anv_queue *queue,
2014 struct anv_utrace_flush_copy *flush)
2015 {
2016 assert(flush->batch_bo);
2017
2018 struct anv_device *device = queue->device;
2019 struct anv_execbuf execbuf = {
2020 .alloc = &device->vk.alloc,
2021 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2022 };
2023
2024 VkResult result = setup_utrace_execbuf(&execbuf, queue, flush);
2025 if (result != VK_SUCCESS)
2026 goto error;
2027
2028 int ret = queue->device->info->no_hw ? 0 :
2029 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2030 if (ret)
2031 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2032
2033 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2034 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2035 if (anv_bo_is_pinned(execbuf.bos[k]))
2036 assert(execbuf.bos[k]->offset == objects[k].offset);
2037 execbuf.bos[k]->offset = objects[k].offset;
2038 }
2039
2040 error:
2041 anv_execbuf_finish(&execbuf);
2042
2043 return result;
2044 }
2045
2046 /* We lock around execbuf for three main reasons:
2047 *
2048 * 1) When a block pool is resized, we create a new gem handle with a
2049 * different size and, in the case of surface states, possibly a different
2050 * center offset but we re-use the same anv_bo struct when we do so. If
2051 * this happens in the middle of setting up an execbuf, we could end up
2052 * with our list of BOs out of sync with our list of gem handles.
2053 *
2054 * 2) The algorithm we use for building the list of unique buffers isn't
2055 * thread-safe. While the client is supposed to synchronize around
2056 * QueueSubmit, this would be extremely difficult to debug if it ever came
2057 * up in the wild due to a broken app. It's better to play it safe and
2058 * just lock around QueueSubmit.
2059 *
2060 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
2061 * userspace. Due to the fact that the surface state buffer is shared
2062 * between batches, we can't afford to have that happen from multiple
2063 * threads at the same time. Even though the user is supposed to ensure
2064 * this doesn't happen, we play it safe as in (2) above.
2065 *
2066 * Since the only other things that ever take the device lock such as block
2067 * pool resize only rarely happen, this will almost never be contended so
2068 * taking a lock isn't really an expensive operation in this case.
2069 */
2070 static VkResult
anv_queue_exec_locked(struct anv_queue * queue,uint32_t wait_count,const struct vk_sync_wait * waits,uint32_t cmd_buffer_count,struct anv_cmd_buffer ** cmd_buffers,uint32_t signal_count,const struct vk_sync_signal * signals,struct anv_query_pool * perf_query_pool,uint32_t perf_query_pass)2071 anv_queue_exec_locked(struct anv_queue *queue,
2072 uint32_t wait_count,
2073 const struct vk_sync_wait *waits,
2074 uint32_t cmd_buffer_count,
2075 struct anv_cmd_buffer **cmd_buffers,
2076 uint32_t signal_count,
2077 const struct vk_sync_signal *signals,
2078 struct anv_query_pool *perf_query_pool,
2079 uint32_t perf_query_pass)
2080 {
2081 struct anv_device *device = queue->device;
2082 struct anv_utrace_flush_copy *utrace_flush_data = NULL;
2083 struct anv_execbuf execbuf = {
2084 .alloc = &queue->device->vk.alloc,
2085 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2086 .perf_query_pass = perf_query_pass,
2087 };
2088
2089 /* Flush the trace points first, they need to be moved */
2090 VkResult result =
2091 anv_device_utrace_flush_cmd_buffers(queue,
2092 cmd_buffer_count,
2093 cmd_buffers,
2094 &utrace_flush_data);
2095 if (result != VK_SUCCESS)
2096 goto error;
2097
2098 if (utrace_flush_data && !utrace_flush_data->batch_bo) {
2099 result = anv_execbuf_add_sync(device, &execbuf,
2100 utrace_flush_data->sync,
2101 true /* is_signal */,
2102 0);
2103 if (result != VK_SUCCESS)
2104 goto error;
2105
2106 utrace_flush_data = NULL;
2107 }
2108
2109 /* Always add the workaround BO as it includes a driver identifier for the
2110 * error_state.
2111 */
2112 result =
2113 anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
2114 if (result != VK_SUCCESS)
2115 goto error;
2116
2117 for (uint32_t i = 0; i < wait_count; i++) {
2118 result = anv_execbuf_add_sync(device, &execbuf,
2119 waits[i].sync,
2120 false /* is_signal */,
2121 waits[i].wait_value);
2122 if (result != VK_SUCCESS)
2123 goto error;
2124 }
2125
2126 for (uint32_t i = 0; i < signal_count; i++) {
2127 result = anv_execbuf_add_sync(device, &execbuf,
2128 signals[i].sync,
2129 true /* is_signal */,
2130 signals[i].signal_value);
2131 if (result != VK_SUCCESS)
2132 goto error;
2133 }
2134
2135 if (queue->sync) {
2136 result = anv_execbuf_add_sync(device, &execbuf,
2137 queue->sync,
2138 true /* is_signal */,
2139 0 /* signal_value */);
2140 if (result != VK_SUCCESS)
2141 goto error;
2142 }
2143
2144 if (cmd_buffer_count) {
2145 result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
2146 cmd_buffers,
2147 cmd_buffer_count);
2148 } else {
2149 result = setup_empty_execbuf(&execbuf, queue);
2150 }
2151
2152 if (result != VK_SUCCESS)
2153 goto error;
2154
2155 const bool has_perf_query =
2156 perf_query_pool && perf_query_pass >= 0 && cmd_buffer_count;
2157
2158 if (INTEL_DEBUG(DEBUG_SUBMIT)) {
2159 fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
2160 execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
2161 for (uint32_t i = 0; i < execbuf.bo_count; i++) {
2162 const struct anv_bo *bo = execbuf.bos[i];
2163
2164 fprintf(stderr, " BO: addr=0x%016"PRIx64"-0x%016"PRIx64" size=0x%010"PRIx64
2165 " handle=%05u name=%s\n",
2166 bo->offset, bo->offset + bo->size - 1, bo->size, bo->gem_handle, bo->name);
2167 }
2168 }
2169
2170 if (INTEL_DEBUG(DEBUG_BATCH)) {
2171 fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
2172 if (cmd_buffer_count) {
2173 if (has_perf_query) {
2174 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
2175 uint64_t pass_batch_offset =
2176 khr_perf_query_preamble_offset(perf_query_pool, perf_query_pass);
2177
2178 intel_print_batch(&device->decoder_ctx,
2179 pass_batch_bo->map + pass_batch_offset, 64,
2180 pass_batch_bo->offset + pass_batch_offset, false);
2181 }
2182
2183 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
2184 struct anv_batch_bo **bo =
2185 u_vector_tail(&cmd_buffers[i]->seen_bbos);
2186 device->cmd_buffer_being_decoded = cmd_buffers[i];
2187 intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
2188 (*bo)->bo->size, (*bo)->bo->offset, false);
2189 device->cmd_buffer_being_decoded = NULL;
2190 }
2191 } else {
2192 intel_print_batch(&device->decoder_ctx,
2193 device->trivial_batch_bo->map,
2194 device->trivial_batch_bo->size,
2195 device->trivial_batch_bo->offset, false);
2196 }
2197 }
2198
2199 if (execbuf.syncobj_values) {
2200 execbuf.timeline_fences.fence_count = execbuf.syncobj_count;
2201 execbuf.timeline_fences.handles_ptr = (uintptr_t)execbuf.syncobjs;
2202 execbuf.timeline_fences.values_ptr = (uintptr_t)execbuf.syncobj_values;
2203 anv_execbuf_add_ext(&execbuf,
2204 DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
2205 &execbuf.timeline_fences.base);
2206 } else if (execbuf.syncobjs) {
2207 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
2208 execbuf.execbuf.num_cliprects = execbuf.syncobj_count;
2209 execbuf.execbuf.cliprects_ptr = (uintptr_t)execbuf.syncobjs;
2210 }
2211
2212 if (has_perf_query) {
2213 assert(perf_query_pass < perf_query_pool->n_passes);
2214 struct intel_perf_query_info *query_info =
2215 perf_query_pool->pass_query[perf_query_pass];
2216
2217 /* Some performance queries just the pipeline statistic HW, no need for
2218 * OA in that case, so no need to reconfigure.
2219 */
2220 if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
2221 (query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
2222 query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
2223 int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
2224 (void *)(uintptr_t) query_info->oa_metrics_set_id);
2225 if (ret < 0) {
2226 result = vk_device_set_lost(&device->vk,
2227 "i915-perf config failed: %s",
2228 strerror(errno));
2229 }
2230 }
2231
2232 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
2233
2234 struct drm_i915_gem_exec_object2 query_pass_object = {
2235 .handle = pass_batch_bo->gem_handle,
2236 .offset = pass_batch_bo->offset,
2237 .flags = pass_batch_bo->flags,
2238 };
2239 struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
2240 .buffers_ptr = (uintptr_t) &query_pass_object,
2241 .buffer_count = 1,
2242 .batch_start_offset = khr_perf_query_preamble_offset(perf_query_pool,
2243 perf_query_pass),
2244 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
2245 .rsvd1 = device->context_id,
2246 };
2247
2248 int ret = queue->device->info->no_hw ? 0 :
2249 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
2250 if (ret)
2251 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2252 }
2253
2254 int ret = queue->device->info->no_hw ? 0 :
2255 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2256 if (ret)
2257 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2258
2259 if (result == VK_SUCCESS && queue->sync) {
2260 result = vk_sync_wait(&device->vk, queue->sync, 0,
2261 VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
2262 if (result != VK_SUCCESS)
2263 result = vk_queue_set_lost(&queue->vk, "sync wait failed");
2264 }
2265
2266 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2267 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2268 if (anv_bo_is_pinned(execbuf.bos[k]))
2269 assert(execbuf.bos[k]->offset == objects[k].offset);
2270 execbuf.bos[k]->offset = objects[k].offset;
2271 }
2272
2273 error:
2274 anv_execbuf_finish(&execbuf);
2275
2276 if (result == VK_SUCCESS && utrace_flush_data)
2277 result = anv_queue_exec_utrace_locked(queue, utrace_flush_data);
2278
2279 return result;
2280 }
2281
2282 static inline bool
can_chain_query_pools(struct anv_query_pool * p1,struct anv_query_pool * p2)2283 can_chain_query_pools(struct anv_query_pool *p1, struct anv_query_pool *p2)
2284 {
2285 return (!p1 || !p2 || p1 == p2);
2286 }
2287
2288 static VkResult
anv_queue_submit_locked(struct anv_queue * queue,struct vk_queue_submit * submit)2289 anv_queue_submit_locked(struct anv_queue *queue,
2290 struct vk_queue_submit *submit)
2291 {
2292 VkResult result;
2293
2294 if (submit->command_buffer_count == 0) {
2295 result = anv_queue_exec_locked(queue, submit->wait_count, submit->waits,
2296 0 /* cmd_buffer_count */,
2297 NULL /* cmd_buffers */,
2298 submit->signal_count, submit->signals,
2299 NULL /* perf_query_pool */,
2300 0 /* perf_query_pass */);
2301 if (result != VK_SUCCESS)
2302 return result;
2303 } else {
2304 /* Everything's easier if we don't have to bother with container_of() */
2305 STATIC_ASSERT(offsetof(struct anv_cmd_buffer, vk) == 0);
2306 struct vk_command_buffer **vk_cmd_buffers = submit->command_buffers;
2307 struct anv_cmd_buffer **cmd_buffers = (void *)vk_cmd_buffers;
2308 uint32_t start = 0;
2309 uint32_t end = submit->command_buffer_count;
2310 struct anv_query_pool *perf_query_pool =
2311 cmd_buffers[start]->perf_query_pool;
2312 for (uint32_t n = 0; n < end; n++) {
2313 bool can_chain = false;
2314 uint32_t next = n + 1;
2315 /* Can we chain the last buffer into the next one? */
2316 if (next < end &&
2317 anv_cmd_buffer_is_chainable(cmd_buffers[next]) &&
2318 can_chain_query_pools
2319 (cmd_buffers[next]->perf_query_pool, perf_query_pool)) {
2320 can_chain = true;
2321 perf_query_pool =
2322 perf_query_pool ? perf_query_pool :
2323 cmd_buffers[next]->perf_query_pool;
2324 }
2325 if (!can_chain) {
2326 /* The next buffer cannot be chained, or we have reached the
2327 * last buffer, submit what have been chained so far.
2328 */
2329 VkResult result =
2330 anv_queue_exec_locked(queue,
2331 start == 0 ? submit->wait_count : 0,
2332 start == 0 ? submit->waits : NULL,
2333 next - start, &cmd_buffers[start],
2334 next == end ? submit->signal_count : 0,
2335 next == end ? submit->signals : NULL,
2336 perf_query_pool,
2337 submit->perf_pass_index);
2338 if (result != VK_SUCCESS)
2339 return result;
2340 if (next < end) {
2341 start = next;
2342 perf_query_pool = cmd_buffers[start]->perf_query_pool;
2343 }
2344 }
2345 }
2346 }
2347 for (uint32_t i = 0; i < submit->signal_count; i++) {
2348 if (!vk_sync_is_anv_bo_sync(submit->signals[i].sync))
2349 continue;
2350
2351 struct anv_bo_sync *bo_sync =
2352 container_of(submit->signals[i].sync, struct anv_bo_sync, sync);
2353
2354 /* Once the execbuf has returned, we need to set the fence state to
2355 * SUBMITTED. We can't do this before calling execbuf because
2356 * anv_GetFenceStatus does take the global device lock before checking
2357 * fence->state.
2358 *
2359 * We set the fence state to SUBMITTED regardless of whether or not the
2360 * execbuf succeeds because we need to ensure that vkWaitForFences() and
2361 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
2362 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
2363 */
2364 assert(bo_sync->state == ANV_BO_SYNC_STATE_RESET);
2365 bo_sync->state = ANV_BO_SYNC_STATE_SUBMITTED;
2366 }
2367
2368 pthread_cond_broadcast(&queue->device->queue_submit);
2369
2370 return VK_SUCCESS;
2371 }
2372
2373 VkResult
anv_queue_submit(struct vk_queue * vk_queue,struct vk_queue_submit * submit)2374 anv_queue_submit(struct vk_queue *vk_queue,
2375 struct vk_queue_submit *submit)
2376 {
2377 struct anv_queue *queue = container_of(vk_queue, struct anv_queue, vk);
2378 struct anv_device *device = queue->device;
2379 VkResult result;
2380
2381 if (queue->device->info->no_hw) {
2382 for (uint32_t i = 0; i < submit->signal_count; i++) {
2383 result = vk_sync_signal(&device->vk,
2384 submit->signals[i].sync,
2385 submit->signals[i].signal_value);
2386 if (result != VK_SUCCESS)
2387 return vk_queue_set_lost(&queue->vk, "vk_sync_signal failed");
2388 }
2389 return VK_SUCCESS;
2390 }
2391
2392 uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
2393
2394 pthread_mutex_lock(&device->mutex);
2395 result = anv_queue_submit_locked(queue, submit);
2396 /* Take submission ID under lock */
2397 pthread_mutex_unlock(&device->mutex);
2398
2399 intel_ds_end_submit(&queue->ds, start_ts);
2400
2401 return result;
2402 }
2403
2404 VkResult
anv_queue_submit_simple_batch(struct anv_queue * queue,struct anv_batch * batch)2405 anv_queue_submit_simple_batch(struct anv_queue *queue,
2406 struct anv_batch *batch)
2407 {
2408 struct anv_device *device = queue->device;
2409 VkResult result = VK_SUCCESS;
2410 int err;
2411
2412 if (queue->device->info->no_hw)
2413 return VK_SUCCESS;
2414
2415 /* This is only used by device init so we can assume the queue is empty and
2416 * we aren't fighting with a submit thread.
2417 */
2418 assert(vk_queue_is_empty(&queue->vk));
2419
2420 uint32_t batch_size = align(batch->next - batch->start, 8);
2421
2422 struct anv_bo *batch_bo = NULL;
2423 result = anv_bo_pool_alloc(&device->batch_bo_pool, batch_size, &batch_bo);
2424 if (result != VK_SUCCESS)
2425 return result;
2426
2427 memcpy(batch_bo->map, batch->start, batch_size);
2428 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
2429 if (device->physical->memory.need_flush)
2430 intel_flush_range(batch_bo->map, batch_size);
2431 #endif
2432
2433 struct anv_execbuf execbuf = {
2434 .alloc = &queue->device->vk.alloc,
2435 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2436 };
2437
2438 result = anv_execbuf_add_bo(device, &execbuf, batch_bo, NULL, 0);
2439 if (result != VK_SUCCESS)
2440 goto fail;
2441
2442 if (INTEL_DEBUG(DEBUG_BATCH)) {
2443 intel_print_batch(&device->decoder_ctx,
2444 batch_bo->map,
2445 batch_bo->size,
2446 batch_bo->offset, false);
2447 }
2448
2449 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
2450 .buffers_ptr = (uintptr_t) execbuf.objects,
2451 .buffer_count = execbuf.bo_count,
2452 .batch_start_offset = 0,
2453 .batch_len = batch_size,
2454 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
2455 .rsvd1 = device->context_id,
2456 .rsvd2 = 0,
2457 };
2458
2459 err = anv_gem_execbuffer(device, &execbuf.execbuf);
2460 if (err) {
2461 result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
2462 goto fail;
2463 }
2464
2465 result = anv_device_wait(device, batch_bo, INT64_MAX);
2466 if (result != VK_SUCCESS) {
2467 result = vk_device_set_lost(&device->vk,
2468 "anv_device_wait failed: %m");
2469 goto fail;
2470 }
2471
2472 fail:
2473 anv_execbuf_finish(&execbuf);
2474 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
2475
2476 return result;
2477 }
2478