1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33 #include "genxml/genX_bits.h"
34 #include "perf/gen_perf.h"
35
36 #include "util/debug.h"
37
38 /** \file anv_batch_chain.c
39 *
40 * This file contains functions related to anv_cmd_buffer as a data
41 * structure. This involves everything required to create and destroy
42 * the actual batch buffers as well as link them together and handle
43 * relocations and surface state. It specifically does *not* contain any
44 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
45 */
46
47 /*-----------------------------------------------------------------------*
48 * Functions related to anv_reloc_list
49 *-----------------------------------------------------------------------*/
50
51 VkResult
anv_reloc_list_init(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)52 anv_reloc_list_init(struct anv_reloc_list *list,
53 const VkAllocationCallbacks *alloc)
54 {
55 memset(list, 0, sizeof(*list));
56 return VK_SUCCESS;
57 }
58
59 static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,const struct anv_reloc_list * other_list)60 anv_reloc_list_init_clone(struct anv_reloc_list *list,
61 const VkAllocationCallbacks *alloc,
62 const struct anv_reloc_list *other_list)
63 {
64 list->num_relocs = other_list->num_relocs;
65 list->array_length = other_list->array_length;
66
67 if (list->num_relocs > 0) {
68 list->relocs =
69 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
71 if (list->relocs == NULL)
72 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
73
74 list->reloc_bos =
75 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
76 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
77 if (list->reloc_bos == NULL) {
78 vk_free(alloc, list->relocs);
79 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
80 }
81
82 memcpy(list->relocs, other_list->relocs,
83 list->array_length * sizeof(*list->relocs));
84 memcpy(list->reloc_bos, other_list->reloc_bos,
85 list->array_length * sizeof(*list->reloc_bos));
86 } else {
87 list->relocs = NULL;
88 list->reloc_bos = NULL;
89 }
90
91 list->dep_words = other_list->dep_words;
92
93 if (list->dep_words > 0) {
94 list->deps =
95 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
96 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
97 memcpy(list->deps, other_list->deps,
98 list->dep_words * sizeof(BITSET_WORD));
99 } else {
100 list->deps = NULL;
101 }
102
103 return VK_SUCCESS;
104 }
105
106 void
anv_reloc_list_finish(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)107 anv_reloc_list_finish(struct anv_reloc_list *list,
108 const VkAllocationCallbacks *alloc)
109 {
110 vk_free(alloc, list->relocs);
111 vk_free(alloc, list->reloc_bos);
112 vk_free(alloc, list->deps);
113 }
114
115 static VkResult
anv_reloc_list_grow(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,size_t num_additional_relocs)116 anv_reloc_list_grow(struct anv_reloc_list *list,
117 const VkAllocationCallbacks *alloc,
118 size_t num_additional_relocs)
119 {
120 if (list->num_relocs + num_additional_relocs <= list->array_length)
121 return VK_SUCCESS;
122
123 size_t new_length = MAX2(16, list->array_length * 2);
124 while (new_length < list->num_relocs + num_additional_relocs)
125 new_length *= 2;
126
127 struct drm_i915_gem_relocation_entry *new_relocs =
128 vk_realloc(alloc, list->relocs,
129 new_length * sizeof(*list->relocs), 8,
130 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
131 if (new_relocs == NULL)
132 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
133 list->relocs = new_relocs;
134
135 struct anv_bo **new_reloc_bos =
136 vk_realloc(alloc, list->reloc_bos,
137 new_length * sizeof(*list->reloc_bos), 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
139 if (new_reloc_bos == NULL)
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
141 list->reloc_bos = new_reloc_bos;
142
143 list->array_length = new_length;
144
145 return VK_SUCCESS;
146 }
147
148 static VkResult
anv_reloc_list_grow_deps(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t min_num_words)149 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
150 const VkAllocationCallbacks *alloc,
151 uint32_t min_num_words)
152 {
153 if (min_num_words <= list->dep_words)
154 return VK_SUCCESS;
155
156 uint32_t new_length = MAX2(32, list->dep_words * 2);
157 while (new_length < min_num_words)
158 new_length *= 2;
159
160 BITSET_WORD *new_deps =
161 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
162 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
163 if (new_deps == NULL)
164 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
165 list->deps = new_deps;
166
167 /* Zero out the new data */
168 memset(list->deps + list->dep_words, 0,
169 (new_length - list->dep_words) * sizeof(BITSET_WORD));
170 list->dep_words = new_length;
171
172 return VK_SUCCESS;
173 }
174
175 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
176
177 VkResult
anv_reloc_list_add(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t offset,struct anv_bo * target_bo,uint32_t delta,uint64_t * address_u64_out)178 anv_reloc_list_add(struct anv_reloc_list *list,
179 const VkAllocationCallbacks *alloc,
180 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
181 uint64_t *address_u64_out)
182 {
183 struct drm_i915_gem_relocation_entry *entry;
184 int index;
185
186 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
187 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
188 if (address_u64_out)
189 *address_u64_out = target_bo_offset + delta;
190
191 assert(unwrapped_target_bo->gem_handle > 0);
192 assert(unwrapped_target_bo->refcount > 0);
193
194 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
195 assert(!target_bo->is_wrapper);
196 uint32_t idx = unwrapped_target_bo->gem_handle;
197 anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
198 BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
199 return VK_SUCCESS;
200 }
201
202 VkResult result = anv_reloc_list_grow(list, alloc, 1);
203 if (result != VK_SUCCESS)
204 return result;
205
206 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
207 index = list->num_relocs++;
208 list->reloc_bos[index] = target_bo;
209 entry = &list->relocs[index];
210 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
211 entry->delta = delta;
212 entry->offset = offset;
213 entry->presumed_offset = target_bo_offset;
214 entry->read_domains = 0;
215 entry->write_domain = 0;
216 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
217
218 return VK_SUCCESS;
219 }
220
221 static void
anv_reloc_list_clear(struct anv_reloc_list * list)222 anv_reloc_list_clear(struct anv_reloc_list *list)
223 {
224 list->num_relocs = 0;
225 if (list->dep_words > 0)
226 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
227 }
228
229 static VkResult
anv_reloc_list_append(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_reloc_list * other,uint32_t offset)230 anv_reloc_list_append(struct anv_reloc_list *list,
231 const VkAllocationCallbacks *alloc,
232 struct anv_reloc_list *other, uint32_t offset)
233 {
234 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
235 if (result != VK_SUCCESS)
236 return result;
237
238 if (other->num_relocs > 0) {
239 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
240 other->num_relocs * sizeof(other->relocs[0]));
241 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
242 other->num_relocs * sizeof(other->reloc_bos[0]));
243
244 for (uint32_t i = 0; i < other->num_relocs; i++)
245 list->relocs[i + list->num_relocs].offset += offset;
246
247 list->num_relocs += other->num_relocs;
248 }
249
250 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
251 for (uint32_t w = 0; w < other->dep_words; w++)
252 list->deps[w] |= other->deps[w];
253
254 return VK_SUCCESS;
255 }
256
257 /*-----------------------------------------------------------------------*
258 * Functions related to anv_batch
259 *-----------------------------------------------------------------------*/
260
261 void *
anv_batch_emit_dwords(struct anv_batch * batch,int num_dwords)262 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
263 {
264 if (batch->next + num_dwords * 4 > batch->end) {
265 VkResult result = batch->extend_cb(batch, batch->user_data);
266 if (result != VK_SUCCESS) {
267 anv_batch_set_error(batch, result);
268 return NULL;
269 }
270 }
271
272 void *p = batch->next;
273
274 batch->next += num_dwords * 4;
275 assert(batch->next <= batch->end);
276
277 return p;
278 }
279
280 uint64_t
anv_batch_emit_reloc(struct anv_batch * batch,void * location,struct anv_bo * bo,uint32_t delta)281 anv_batch_emit_reloc(struct anv_batch *batch,
282 void *location, struct anv_bo *bo, uint32_t delta)
283 {
284 uint64_t address_u64 = 0;
285 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
286 location - batch->start, bo, delta,
287 &address_u64);
288 if (result != VK_SUCCESS) {
289 anv_batch_set_error(batch, result);
290 return 0;
291 }
292
293 return address_u64;
294 }
295
296 struct anv_address
anv_batch_address(struct anv_batch * batch,void * batch_location)297 anv_batch_address(struct anv_batch *batch, void *batch_location)
298 {
299 assert(batch->start < batch_location);
300
301 /* Allow a jump at the current location of the batch. */
302 assert(batch->next >= batch_location);
303
304 return anv_address_add(batch->start_addr, batch_location - batch->start);
305 }
306
307 void
anv_batch_emit_batch(struct anv_batch * batch,struct anv_batch * other)308 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
309 {
310 uint32_t size, offset;
311
312 size = other->next - other->start;
313 assert(size % 4 == 0);
314
315 if (batch->next + size > batch->end) {
316 VkResult result = batch->extend_cb(batch, batch->user_data);
317 if (result != VK_SUCCESS) {
318 anv_batch_set_error(batch, result);
319 return;
320 }
321 }
322
323 assert(batch->next + size <= batch->end);
324
325 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
326 memcpy(batch->next, other->start, size);
327
328 offset = batch->next - batch->start;
329 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
330 other->relocs, offset);
331 if (result != VK_SUCCESS) {
332 anv_batch_set_error(batch, result);
333 return;
334 }
335
336 batch->next += size;
337 }
338
339 /*-----------------------------------------------------------------------*
340 * Functions related to anv_batch_bo
341 *-----------------------------------------------------------------------*/
342
343 static VkResult
anv_batch_bo_create(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo ** bbo_out)344 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
345 struct anv_batch_bo **bbo_out)
346 {
347 VkResult result;
348
349 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
350 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
351 if (bbo == NULL)
352 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
353
354 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
355 ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
356 if (result != VK_SUCCESS)
357 goto fail_alloc;
358
359 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
360 if (result != VK_SUCCESS)
361 goto fail_bo_alloc;
362
363 *bbo_out = bbo;
364
365 return VK_SUCCESS;
366
367 fail_bo_alloc:
368 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
369 fail_alloc:
370 vk_free(&cmd_buffer->pool->alloc, bbo);
371
372 return result;
373 }
374
375 static VkResult
anv_batch_bo_clone(struct anv_cmd_buffer * cmd_buffer,const struct anv_batch_bo * other_bbo,struct anv_batch_bo ** bbo_out)376 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
377 const struct anv_batch_bo *other_bbo,
378 struct anv_batch_bo **bbo_out)
379 {
380 VkResult result;
381
382 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
383 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
384 if (bbo == NULL)
385 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
386
387 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
388 other_bbo->bo->size, &bbo->bo);
389 if (result != VK_SUCCESS)
390 goto fail_alloc;
391
392 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
393 &other_bbo->relocs);
394 if (result != VK_SUCCESS)
395 goto fail_bo_alloc;
396
397 bbo->length = other_bbo->length;
398 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
399 *bbo_out = bbo;
400
401 return VK_SUCCESS;
402
403 fail_bo_alloc:
404 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
405 fail_alloc:
406 vk_free(&cmd_buffer->pool->alloc, bbo);
407
408 return result;
409 }
410
411 static void
anv_batch_bo_start(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)412 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
413 size_t batch_padding)
414 {
415 anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
416 bbo->bo->map, bbo->bo->size - batch_padding);
417 batch->relocs = &bbo->relocs;
418 anv_reloc_list_clear(&bbo->relocs);
419 }
420
421 static void
anv_batch_bo_continue(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)422 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
423 size_t batch_padding)
424 {
425 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
426 batch->start = bbo->bo->map;
427 batch->next = bbo->bo->map + bbo->length;
428 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
429 batch->relocs = &bbo->relocs;
430 }
431
432 static void
anv_batch_bo_finish(struct anv_batch_bo * bbo,struct anv_batch * batch)433 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
434 {
435 assert(batch->start == bbo->bo->map);
436 bbo->length = batch->next - batch->start;
437 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
438 }
439
440 static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo,struct anv_batch * batch,size_t aditional,size_t batch_padding)441 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
442 struct anv_batch *batch, size_t aditional,
443 size_t batch_padding)
444 {
445 assert(batch->start == bbo->bo->map);
446 bbo->length = batch->next - batch->start;
447
448 size_t new_size = bbo->bo->size;
449 while (new_size <= bbo->length + aditional + batch_padding)
450 new_size *= 2;
451
452 if (new_size == bbo->bo->size)
453 return VK_SUCCESS;
454
455 struct anv_bo *new_bo;
456 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
457 new_size, &new_bo);
458 if (result != VK_SUCCESS)
459 return result;
460
461 memcpy(new_bo->map, bbo->bo->map, bbo->length);
462
463 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
464
465 bbo->bo = new_bo;
466 anv_batch_bo_continue(bbo, batch, batch_padding);
467
468 return VK_SUCCESS;
469 }
470
471 static void
anv_batch_bo_link(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * prev_bbo,struct anv_batch_bo * next_bbo,uint32_t next_bbo_offset)472 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
473 struct anv_batch_bo *prev_bbo,
474 struct anv_batch_bo *next_bbo,
475 uint32_t next_bbo_offset)
476 {
477 const uint32_t bb_start_offset =
478 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
479 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
480
481 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
482 assert(((*bb_start >> 29) & 0x07) == 0);
483 assert(((*bb_start >> 23) & 0x3f) == 49);
484
485 if (cmd_buffer->device->physical->use_softpin) {
486 assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
487 assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
488
489 write_reloc(cmd_buffer->device,
490 prev_bbo->bo->map + bb_start_offset + 4,
491 next_bbo->bo->offset + next_bbo_offset, true);
492 } else {
493 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
494 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
495
496 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
497 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
498
499 /* Use a bogus presumed offset to force a relocation */
500 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
501 }
502 }
503
504 static void
anv_batch_bo_destroy(struct anv_batch_bo * bbo,struct anv_cmd_buffer * cmd_buffer)505 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
506 struct anv_cmd_buffer *cmd_buffer)
507 {
508 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
509 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
510 vk_free(&cmd_buffer->pool->alloc, bbo);
511 }
512
513 static VkResult
anv_batch_bo_list_clone(const struct list_head * list,struct anv_cmd_buffer * cmd_buffer,struct list_head * new_list)514 anv_batch_bo_list_clone(const struct list_head *list,
515 struct anv_cmd_buffer *cmd_buffer,
516 struct list_head *new_list)
517 {
518 VkResult result = VK_SUCCESS;
519
520 list_inithead(new_list);
521
522 struct anv_batch_bo *prev_bbo = NULL;
523 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
524 struct anv_batch_bo *new_bbo = NULL;
525 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
526 if (result != VK_SUCCESS)
527 break;
528 list_addtail(&new_bbo->link, new_list);
529
530 if (prev_bbo)
531 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
532
533 prev_bbo = new_bbo;
534 }
535
536 if (result != VK_SUCCESS) {
537 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
538 list_del(&bbo->link);
539 anv_batch_bo_destroy(bbo, cmd_buffer);
540 }
541 }
542
543 return result;
544 }
545
546 /*-----------------------------------------------------------------------*
547 * Functions related to anv_batch_bo
548 *-----------------------------------------------------------------------*/
549
550 static struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer * cmd_buffer)551 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
552 {
553 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
554 }
555
556 struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer * cmd_buffer)557 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
558 {
559 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
560 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
561 return (struct anv_address) {
562 .bo = pool->block_pool.bo,
563 .offset = bt_block->offset - pool->start_offset,
564 };
565 }
566
567 static void
emit_batch_buffer_start(struct anv_cmd_buffer * cmd_buffer,struct anv_bo * bo,uint32_t offset)568 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
569 struct anv_bo *bo, uint32_t offset)
570 {
571 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
572 * offsets. The high 16 bits are in the last dword, so we can use the gen8
573 * version in either case, as long as we set the instruction length in the
574 * header accordingly. This means that we always emit three dwords here
575 * and all the padding and adjustment we do in this file works for all
576 * gens.
577 */
578
579 #define GEN7_MI_BATCH_BUFFER_START_length 2
580 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
581
582 const uint32_t gen7_length =
583 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
584 const uint32_t gen8_length =
585 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
586
587 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
588 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
589 gen7_length : gen8_length;
590 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
591 bbs.AddressSpaceIndicator = ASI_PPGTT;
592 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
593 }
594 }
595
596 static void
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo)597 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
598 struct anv_batch_bo *bbo)
599 {
600 struct anv_batch *batch = &cmd_buffer->batch;
601 struct anv_batch_bo *current_bbo =
602 anv_cmd_buffer_current_batch_bo(cmd_buffer);
603
604 /* We set the end of the batch a little short so we would be sure we
605 * have room for the chaining command. Since we're about to emit the
606 * chaining command, let's set it back where it should go.
607 */
608 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
609 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
610
611 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
612
613 anv_batch_bo_finish(current_bbo, batch);
614 }
615
616 static VkResult
anv_cmd_buffer_chain_batch(struct anv_batch * batch,void * _data)617 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
618 {
619 struct anv_cmd_buffer *cmd_buffer = _data;
620 struct anv_batch_bo *new_bbo;
621
622 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
623 if (result != VK_SUCCESS)
624 return result;
625
626 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
627 if (seen_bbo == NULL) {
628 anv_batch_bo_destroy(new_bbo, cmd_buffer);
629 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
630 }
631 *seen_bbo = new_bbo;
632
633 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
634
635 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
636
637 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
638
639 return VK_SUCCESS;
640 }
641
642 static VkResult
anv_cmd_buffer_grow_batch(struct anv_batch * batch,void * _data)643 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
644 {
645 struct anv_cmd_buffer *cmd_buffer = _data;
646 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
647
648 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
649 GEN8_MI_BATCH_BUFFER_START_length * 4);
650
651 return VK_SUCCESS;
652 }
653
654 /** Allocate a binding table
655 *
656 * This function allocates a binding table. This is a bit more complicated
657 * than one would think due to a combination of Vulkan driver design and some
658 * unfortunate hardware restrictions.
659 *
660 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
661 * the binding table pointer which means that all binding tables need to live
662 * in the bottom 64k of surface state base address. The way the GL driver has
663 * classically dealt with this restriction is to emit all surface states
664 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
665 * isn't really an option in Vulkan for a couple of reasons:
666 *
667 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
668 * to live in their own buffer and we have to be able to re-emit
669 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
670 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
671 * (it's not that hard to hit 64k of just binding tables), we allocate
672 * surface state objects up-front when VkImageView is created. In order
673 * for this to work, surface state objects need to be allocated from a
674 * global buffer.
675 *
676 * 2) We tried to design the surface state system in such a way that it's
677 * already ready for bindless texturing. The way bindless texturing works
678 * on our hardware is that you have a big pool of surface state objects
679 * (with its own state base address) and the bindless handles are simply
680 * offsets into that pool. With the architecture we chose, we already
681 * have that pool and it's exactly the same pool that we use for regular
682 * surface states so we should already be ready for bindless.
683 *
684 * 3) For render targets, we need to be able to fill out the surface states
685 * later in vkBeginRenderPass so that we can assign clear colors
686 * correctly. One way to do this would be to just create the surface
687 * state data and then repeatedly copy it into the surface state BO every
688 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
689 * rather annoying and just being able to allocate them up-front and
690 * re-use them for the entire render pass.
691 *
692 * While none of these are technically blockers for emitting state on the fly
693 * like we do in GL, the ability to have a single surface state pool is
694 * simplifies things greatly. Unfortunately, it comes at a cost...
695 *
696 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
697 * place the binding tables just anywhere in surface state base address.
698 * Because 64k isn't a whole lot of space, we can't simply restrict the
699 * surface state buffer to 64k, we have to be more clever. The solution we've
700 * chosen is to have a block pool with a maximum size of 2G that starts at
701 * zero and grows in both directions. All surface states are allocated from
702 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
703 * binding tables from the bottom of the pool (negative offsets). Every time
704 * we allocate a new binding table block, we set surface state base address to
705 * point to the bottom of the binding table block. This way all of the
706 * binding tables in the block are in the bottom 64k of surface state base
707 * address. When we fill out the binding table, we add the distance between
708 * the bottom of our binding table block and zero of the block pool to the
709 * surface state offsets so that they are correct relative to out new surface
710 * state base address at the bottom of the binding table block.
711 *
712 * \see adjust_relocations_from_block_pool()
713 * \see adjust_relocations_too_block_pool()
714 *
715 * \param[in] entries The number of surface state entries the binding
716 * table should be able to hold.
717 *
718 * \param[out] state_offset The offset surface surface state base address
719 * where the surface states live. This must be
720 * added to the surface state offset when it is
721 * written into the binding table entry.
722 *
723 * \return An anv_state representing the binding table
724 */
725 struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t entries,uint32_t * state_offset)726 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
727 uint32_t entries, uint32_t *state_offset)
728 {
729 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
730
731 uint32_t bt_size = align_u32(entries * 4, 32);
732
733 struct anv_state state = cmd_buffer->bt_next;
734 if (bt_size > state.alloc_size)
735 return (struct anv_state) { 0 };
736
737 state.alloc_size = bt_size;
738 cmd_buffer->bt_next.offset += bt_size;
739 cmd_buffer->bt_next.map += bt_size;
740 cmd_buffer->bt_next.alloc_size -= bt_size;
741
742 assert(bt_block->offset < 0);
743 *state_offset = -bt_block->offset;
744
745 return state;
746 }
747
748 struct anv_state
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer * cmd_buffer)749 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
750 {
751 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
752 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
753 isl_dev->ss.size, isl_dev->ss.align);
754 }
755
756 struct anv_state
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer * cmd_buffer,uint32_t size,uint32_t alignment)757 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
758 uint32_t size, uint32_t alignment)
759 {
760 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
761 size, alignment);
762 }
763
764 VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer * cmd_buffer)765 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
766 {
767 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
768 if (bt_block == NULL) {
769 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
770 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
771 }
772
773 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
774
775 /* The bt_next state is a rolling state (we update it as we suballocate
776 * from it) which is relative to the start of the binding table block.
777 */
778 cmd_buffer->bt_next = *bt_block;
779 cmd_buffer->bt_next.offset = 0;
780
781 return VK_SUCCESS;
782 }
783
784 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)785 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
786 {
787 struct anv_batch_bo *batch_bo;
788 VkResult result;
789
790 list_inithead(&cmd_buffer->batch_bos);
791
792 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
793 if (result != VK_SUCCESS)
794 return result;
795
796 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
797
798 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
799 cmd_buffer->batch.user_data = cmd_buffer;
800
801 if (cmd_buffer->device->can_chain_batches) {
802 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
803 } else {
804 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
805 }
806
807 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
808 GEN8_MI_BATCH_BUFFER_START_length * 4);
809
810 int success = u_vector_init(&cmd_buffer->seen_bbos,
811 sizeof(struct anv_bo *),
812 8 * sizeof(struct anv_bo *));
813 if (!success)
814 goto fail_batch_bo;
815
816 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
817
818 /* u_vector requires power-of-two size elements */
819 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
820 success = u_vector_init(&cmd_buffer->bt_block_states,
821 pow2_state_size, 8 * pow2_state_size);
822 if (!success)
823 goto fail_seen_bbos;
824
825 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
826 &cmd_buffer->pool->alloc);
827 if (result != VK_SUCCESS)
828 goto fail_bt_blocks;
829 cmd_buffer->last_ss_pool_center = 0;
830
831 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
832 if (result != VK_SUCCESS)
833 goto fail_bt_blocks;
834
835 return VK_SUCCESS;
836
837 fail_bt_blocks:
838 u_vector_finish(&cmd_buffer->bt_block_states);
839 fail_seen_bbos:
840 u_vector_finish(&cmd_buffer->seen_bbos);
841 fail_batch_bo:
842 anv_batch_bo_destroy(batch_bo, cmd_buffer);
843
844 return result;
845 }
846
847 void
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)848 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
849 {
850 struct anv_state *bt_block;
851 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
852 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
853 u_vector_finish(&cmd_buffer->bt_block_states);
854
855 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
856
857 u_vector_finish(&cmd_buffer->seen_bbos);
858
859 /* Destroy all of the batch buffers */
860 list_for_each_entry_safe(struct anv_batch_bo, bbo,
861 &cmd_buffer->batch_bos, link) {
862 list_del(&bbo->link);
863 anv_batch_bo_destroy(bbo, cmd_buffer);
864 }
865 }
866
867 void
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)868 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
869 {
870 /* Delete all but the first batch bo */
871 assert(!list_is_empty(&cmd_buffer->batch_bos));
872 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
873 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
874 list_del(&bbo->link);
875 anv_batch_bo_destroy(bbo, cmd_buffer);
876 }
877 assert(!list_is_empty(&cmd_buffer->batch_bos));
878
879 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
880 &cmd_buffer->batch,
881 GEN8_MI_BATCH_BUFFER_START_length * 4);
882
883 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
884 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
885 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
886 }
887 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
888 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
889 cmd_buffer->bt_next.offset = 0;
890
891 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
892 cmd_buffer->last_ss_pool_center = 0;
893
894 /* Reset the list of seen buffers */
895 cmd_buffer->seen_bbos.head = 0;
896 cmd_buffer->seen_bbos.tail = 0;
897
898 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
899 anv_cmd_buffer_current_batch_bo(cmd_buffer);
900 }
901
902 void
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer * cmd_buffer)903 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
904 {
905 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
906
907 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
908 /* When we start a batch buffer, we subtract a certain amount of
909 * padding from the end to ensure that we always have room to emit a
910 * BATCH_BUFFER_START to chain to the next BO. We need to remove
911 * that padding before we end the batch; otherwise, we may end up
912 * with our BATCH_BUFFER_END in another BO.
913 */
914 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
915 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
916
917 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
918
919 /* Round batch up to an even number of dwords. */
920 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
921 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
922
923 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
924 } else {
925 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
926 /* If this is a secondary command buffer, we need to determine the
927 * mode in which it will be executed with vkExecuteCommands. We
928 * determine this statically here so that this stays in sync with the
929 * actual ExecuteCommands implementation.
930 */
931 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
932 if (!cmd_buffer->device->can_chain_batches) {
933 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
934 } else if (cmd_buffer->device->physical->use_call_secondary) {
935 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
936 /* If the secondary command buffer begins & ends in the same BO and
937 * its length is less than the length of CS prefetch, add some NOOPs
938 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
939 * prefetch.
940 */
941 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
942 int32_t batch_len =
943 cmd_buffer->batch.next - cmd_buffer->batch.start;
944
945 for (int32_t i = 0; i < (512 - batch_len); i += 4)
946 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
947 }
948
949 void *jump_addr =
950 anv_batch_emitn(&cmd_buffer->batch,
951 GEN8_MI_BATCH_BUFFER_START_length,
952 GEN8_MI_BATCH_BUFFER_START,
953 .AddressSpaceIndicator = ASI_PPGTT,
954 .SecondLevelBatchBuffer = Firstlevelbatch) +
955 (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
956 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
957
958 /* The emit above may have caused us to chain batch buffers which
959 * would mean that batch_bo is no longer valid.
960 */
961 batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
962 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
963 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
964 /* If the secondary has exactly one batch buffer in its list *and*
965 * that batch buffer is less than half of the maximum size, we're
966 * probably better of simply copying it into our batch.
967 */
968 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
969 } else if (!(cmd_buffer->usage_flags &
970 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
971 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
972
973 /* In order to chain, we need this command buffer to contain an
974 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
975 * It doesn't matter where it points now so long as has a valid
976 * relocation. We'll adjust it later as part of the chaining
977 * process.
978 *
979 * We set the end of the batch a little short so we would be sure we
980 * have room for the chaining command. Since we're about to emit the
981 * chaining command, let's set it back where it should go.
982 */
983 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
984 assert(cmd_buffer->batch.start == batch_bo->bo->map);
985 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
986
987 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
988 assert(cmd_buffer->batch.start == batch_bo->bo->map);
989 } else {
990 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
991 }
992 }
993
994 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
995 }
996
997 static VkResult
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer * cmd_buffer,struct list_head * list)998 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
999 struct list_head *list)
1000 {
1001 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1002 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1003 if (bbo_ptr == NULL)
1004 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1005
1006 *bbo_ptr = bbo;
1007 }
1008
1009 return VK_SUCCESS;
1010 }
1011
1012 void
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer * primary,struct anv_cmd_buffer * secondary)1013 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1014 struct anv_cmd_buffer *secondary)
1015 {
1016 switch (secondary->exec_mode) {
1017 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1018 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1019 break;
1020 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1021 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1022 unsigned length = secondary->batch.end - secondary->batch.start;
1023 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1024 GEN8_MI_BATCH_BUFFER_START_length * 4);
1025 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1026 break;
1027 }
1028 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1029 struct anv_batch_bo *first_bbo =
1030 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1031 struct anv_batch_bo *last_bbo =
1032 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1033
1034 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1035
1036 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1037 assert(primary->batch.start == this_bbo->bo->map);
1038 uint32_t offset = primary->batch.next - primary->batch.start;
1039
1040 /* Make the tail of the secondary point back to right after the
1041 * MI_BATCH_BUFFER_START in the primary batch.
1042 */
1043 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1044
1045 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1046 break;
1047 }
1048 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1049 struct list_head copy_list;
1050 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1051 secondary,
1052 ©_list);
1053 if (result != VK_SUCCESS)
1054 return; /* FIXME */
1055
1056 anv_cmd_buffer_add_seen_bbos(primary, ©_list);
1057
1058 struct anv_batch_bo *first_bbo =
1059 list_first_entry(©_list, struct anv_batch_bo, link);
1060 struct anv_batch_bo *last_bbo =
1061 list_last_entry(©_list, struct anv_batch_bo, link);
1062
1063 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1064
1065 list_splicetail(©_list, &primary->batch_bos);
1066
1067 anv_batch_bo_continue(last_bbo, &primary->batch,
1068 GEN8_MI_BATCH_BUFFER_START_length * 4);
1069 break;
1070 }
1071 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1072 struct anv_batch_bo *first_bbo =
1073 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1074
1075 uint64_t *write_return_addr =
1076 anv_batch_emitn(&primary->batch,
1077 GEN8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1078 GEN8_MI_STORE_DATA_IMM,
1079 .Address = secondary->return_addr)
1080 + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1081
1082 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1083
1084 *write_return_addr =
1085 anv_address_physical(anv_batch_address(&primary->batch,
1086 primary->batch.next));
1087
1088 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1089 break;
1090 }
1091 default:
1092 assert(!"Invalid execution mode");
1093 }
1094
1095 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1096 &secondary->surface_relocs, 0);
1097 }
1098
1099 struct anv_execbuf {
1100 struct drm_i915_gem_execbuffer2 execbuf;
1101
1102 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1103
1104 struct drm_i915_gem_exec_object2 * objects;
1105 uint32_t bo_count;
1106 struct anv_bo ** bos;
1107
1108 /* Allocated length of the 'objects' and 'bos' arrays */
1109 uint32_t array_length;
1110
1111 bool has_relocs;
1112
1113 const VkAllocationCallbacks * alloc;
1114 VkSystemAllocationScope alloc_scope;
1115
1116 int perf_query_pass;
1117 };
1118
1119 static void
anv_execbuf_init(struct anv_execbuf * exec)1120 anv_execbuf_init(struct anv_execbuf *exec)
1121 {
1122 memset(exec, 0, sizeof(*exec));
1123 }
1124
1125 static void
anv_execbuf_finish(struct anv_execbuf * exec)1126 anv_execbuf_finish(struct anv_execbuf *exec)
1127 {
1128 vk_free(exec->alloc, exec->objects);
1129 vk_free(exec->alloc, exec->bos);
1130 }
1131
1132 static void
anv_execbuf_add_ext(struct anv_execbuf * exec,uint32_t ext_name,struct i915_user_extension * ext)1133 anv_execbuf_add_ext(struct anv_execbuf *exec,
1134 uint32_t ext_name,
1135 struct i915_user_extension *ext)
1136 {
1137 __u64 *iter = &exec->execbuf.cliprects_ptr;
1138
1139 exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1140
1141 while (*iter != 0) {
1142 iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1143 }
1144
1145 ext->name = ext_name;
1146
1147 *iter = (uintptr_t) ext;
1148 }
1149
1150 static VkResult
1151 anv_execbuf_add_bo_bitset(struct anv_device *device,
1152 struct anv_execbuf *exec,
1153 uint32_t dep_words,
1154 BITSET_WORD *deps,
1155 uint32_t extra_flags);
1156
1157 static VkResult
anv_execbuf_add_bo(struct anv_device * device,struct anv_execbuf * exec,struct anv_bo * bo,struct anv_reloc_list * relocs,uint32_t extra_flags)1158 anv_execbuf_add_bo(struct anv_device *device,
1159 struct anv_execbuf *exec,
1160 struct anv_bo *bo,
1161 struct anv_reloc_list *relocs,
1162 uint32_t extra_flags)
1163 {
1164 struct drm_i915_gem_exec_object2 *obj = NULL;
1165
1166 bo = anv_bo_unwrap(bo);
1167
1168 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1169 obj = &exec->objects[bo->index];
1170
1171 if (obj == NULL) {
1172 /* We've never seen this one before. Add it to the list and assign
1173 * an id that we can use later.
1174 */
1175 if (exec->bo_count >= exec->array_length) {
1176 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1177
1178 struct drm_i915_gem_exec_object2 *new_objects =
1179 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1180 if (new_objects == NULL)
1181 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1182
1183 struct anv_bo **new_bos =
1184 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1185 if (new_bos == NULL) {
1186 vk_free(exec->alloc, new_objects);
1187 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1188 }
1189
1190 if (exec->objects) {
1191 memcpy(new_objects, exec->objects,
1192 exec->bo_count * sizeof(*new_objects));
1193 memcpy(new_bos, exec->bos,
1194 exec->bo_count * sizeof(*new_bos));
1195 }
1196
1197 vk_free(exec->alloc, exec->objects);
1198 vk_free(exec->alloc, exec->bos);
1199
1200 exec->objects = new_objects;
1201 exec->bos = new_bos;
1202 exec->array_length = new_len;
1203 }
1204
1205 assert(exec->bo_count < exec->array_length);
1206
1207 bo->index = exec->bo_count++;
1208 obj = &exec->objects[bo->index];
1209 exec->bos[bo->index] = bo;
1210
1211 obj->handle = bo->gem_handle;
1212 obj->relocation_count = 0;
1213 obj->relocs_ptr = 0;
1214 obj->alignment = 0;
1215 obj->offset = bo->offset;
1216 obj->flags = bo->flags | extra_flags;
1217 obj->rsvd1 = 0;
1218 obj->rsvd2 = 0;
1219 }
1220
1221 if (extra_flags & EXEC_OBJECT_WRITE) {
1222 obj->flags |= EXEC_OBJECT_WRITE;
1223 obj->flags &= ~EXEC_OBJECT_ASYNC;
1224 }
1225
1226 if (relocs != NULL) {
1227 assert(obj->relocation_count == 0);
1228
1229 if (relocs->num_relocs > 0) {
1230 /* This is the first time we've ever seen a list of relocations for
1231 * this BO. Go ahead and set the relocations and then walk the list
1232 * of relocations and add them all.
1233 */
1234 exec->has_relocs = true;
1235 obj->relocation_count = relocs->num_relocs;
1236 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1237
1238 for (size_t i = 0; i < relocs->num_relocs; i++) {
1239 VkResult result;
1240
1241 /* A quick sanity check on relocations */
1242 assert(relocs->relocs[i].offset < bo->size);
1243 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1244 NULL, extra_flags);
1245 if (result != VK_SUCCESS)
1246 return result;
1247 }
1248 }
1249
1250 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1251 relocs->deps, extra_flags);
1252 }
1253
1254 return VK_SUCCESS;
1255 }
1256
1257 /* Add BO dependencies to execbuf */
1258 static VkResult
anv_execbuf_add_bo_bitset(struct anv_device * device,struct anv_execbuf * exec,uint32_t dep_words,BITSET_WORD * deps,uint32_t extra_flags)1259 anv_execbuf_add_bo_bitset(struct anv_device *device,
1260 struct anv_execbuf *exec,
1261 uint32_t dep_words,
1262 BITSET_WORD *deps,
1263 uint32_t extra_flags)
1264 {
1265 for (uint32_t w = 0; w < dep_words; w++) {
1266 BITSET_WORD mask = deps[w];
1267 while (mask) {
1268 int i = u_bit_scan(&mask);
1269 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1270 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1271 assert(bo->refcount > 0);
1272 VkResult result =
1273 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1274 if (result != VK_SUCCESS)
1275 return result;
1276 }
1277 }
1278
1279 return VK_SUCCESS;
1280 }
1281
1282 static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_reloc_list * list)1283 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1284 struct anv_reloc_list *list)
1285 {
1286 for (size_t i = 0; i < list->num_relocs; i++)
1287 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1288 }
1289
1290 static void
adjust_relocations_from_state_pool(struct anv_state_pool * pool,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1291 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1292 struct anv_reloc_list *relocs,
1293 uint32_t last_pool_center_bo_offset)
1294 {
1295 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1296 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1297
1298 for (size_t i = 0; i < relocs->num_relocs; i++) {
1299 /* All of the relocations from this block pool to other BO's should
1300 * have been emitted relative to the surface block pool center. We
1301 * need to add the center offset to make them relative to the
1302 * beginning of the actual GEM bo.
1303 */
1304 relocs->relocs[i].offset += delta;
1305 }
1306 }
1307
1308 static void
adjust_relocations_to_state_pool(struct anv_state_pool * pool,struct anv_bo * from_bo,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1309 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1310 struct anv_bo *from_bo,
1311 struct anv_reloc_list *relocs,
1312 uint32_t last_pool_center_bo_offset)
1313 {
1314 assert(!from_bo->is_wrapper);
1315 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1316 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1317
1318 /* When we initially emit relocations into a block pool, we don't
1319 * actually know what the final center_bo_offset will be so we just emit
1320 * it as if center_bo_offset == 0. Now that we know what the center
1321 * offset is, we need to walk the list of relocations and adjust any
1322 * relocations that point to the pool bo with the correct offset.
1323 */
1324 for (size_t i = 0; i < relocs->num_relocs; i++) {
1325 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1326 /* Adjust the delta value in the relocation to correctly
1327 * correspond to the new delta. Initially, this value may have
1328 * been negative (if treated as unsigned), but we trust in
1329 * uint32_t roll-over to fix that for us at this point.
1330 */
1331 relocs->relocs[i].delta += delta;
1332
1333 /* Since the delta has changed, we need to update the actual
1334 * relocated value with the new presumed value. This function
1335 * should only be called on batch buffers, so we know it isn't in
1336 * use by the GPU at the moment.
1337 */
1338 assert(relocs->relocs[i].offset < from_bo->size);
1339 write_reloc(pool->block_pool.device,
1340 from_bo->map + relocs->relocs[i].offset,
1341 relocs->relocs[i].presumed_offset +
1342 relocs->relocs[i].delta, false);
1343 }
1344 }
1345 }
1346
1347 static void
anv_reloc_list_apply(struct anv_device * device,struct anv_reloc_list * list,struct anv_bo * bo,bool always_relocate)1348 anv_reloc_list_apply(struct anv_device *device,
1349 struct anv_reloc_list *list,
1350 struct anv_bo *bo,
1351 bool always_relocate)
1352 {
1353 bo = anv_bo_unwrap(bo);
1354
1355 for (size_t i = 0; i < list->num_relocs; i++) {
1356 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1357 if (list->relocs[i].presumed_offset == target_bo->offset &&
1358 !always_relocate)
1359 continue;
1360
1361 void *p = bo->map + list->relocs[i].offset;
1362 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1363 list->relocs[i].presumed_offset = target_bo->offset;
1364 }
1365 }
1366
1367 /**
1368 * This function applies the relocation for a command buffer and writes the
1369 * actual addresses into the buffers as per what we were told by the kernel on
1370 * the previous execbuf2 call. This should be safe to do because, for each
1371 * relocated address, we have two cases:
1372 *
1373 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1374 * not in use by the GPU so updating the address is 100% ok. It won't be
1375 * in-use by the GPU (from our context) again until the next execbuf2
1376 * happens. If the kernel decides to move it in the next execbuf2, it
1377 * will have to do the relocations itself, but that's ok because it should
1378 * have all of the information needed to do so.
1379 *
1380 * 2) The target BO is active (as seen by the kernel). In this case, it
1381 * hasn't moved since the last execbuffer2 call because GTT shuffling
1382 * *only* happens when the BO is idle. (From our perspective, it only
1383 * happens inside the execbuffer2 ioctl, but the shuffling may be
1384 * triggered by another ioctl, with full-ppgtt this is limited to only
1385 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1386 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1387 * address and the relocated value we are writing into the BO will be the
1388 * same as the value that is already there.
1389 *
1390 * There is also a possibility that the target BO is active but the exact
1391 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1392 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1393 * may be stale but it's still safe to write the relocation because that
1394 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1395 * won't be until the next execbuf2 call.
1396 *
1397 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1398 * need to bother. We want to do this because the surface state buffer is
1399 * used by every command buffer so, if the kernel does the relocations, it
1400 * will always be busy and the kernel will always stall. This is also
1401 * probably the fastest mechanism for doing relocations since the kernel would
1402 * have to make a full copy of all the relocations lists.
1403 */
1404 static bool
relocate_cmd_buffer(struct anv_cmd_buffer * cmd_buffer,struct anv_execbuf * exec)1405 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1406 struct anv_execbuf *exec)
1407 {
1408 if (!exec->has_relocs)
1409 return true;
1410
1411 static int userspace_relocs = -1;
1412 if (userspace_relocs < 0)
1413 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1414 if (!userspace_relocs)
1415 return false;
1416
1417 /* First, we have to check to see whether or not we can even do the
1418 * relocation. New buffers which have never been submitted to the kernel
1419 * don't have a valid offset so we need to let the kernel do relocations so
1420 * that we can get offsets for them. On future execbuf2 calls, those
1421 * buffers will have offsets and we will be able to skip relocating.
1422 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1423 */
1424 for (uint32_t i = 0; i < exec->bo_count; i++) {
1425 assert(!exec->bos[i]->is_wrapper);
1426 if (exec->bos[i]->offset == (uint64_t)-1)
1427 return false;
1428 }
1429
1430 /* Since surface states are shared between command buffers and we don't
1431 * know what order they will be submitted to the kernel, we don't know
1432 * what address is actually written in the surface state object at any
1433 * given time. The only option is to always relocate them.
1434 */
1435 struct anv_bo *surface_state_bo =
1436 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1437 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1438 surface_state_bo,
1439 true /* always relocate surface states */);
1440
1441 /* Since we own all of the batch buffers, we know what values are stored
1442 * in the relocated addresses and only have to update them if the offsets
1443 * have changed.
1444 */
1445 struct anv_batch_bo **bbo;
1446 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1447 anv_reloc_list_apply(cmd_buffer->device,
1448 &(*bbo)->relocs, (*bbo)->bo, false);
1449 }
1450
1451 for (uint32_t i = 0; i < exec->bo_count; i++)
1452 exec->objects[i].offset = exec->bos[i]->offset;
1453
1454 return true;
1455 }
1456
1457 static VkResult
setup_execbuf_for_cmd_buffer(struct anv_execbuf * execbuf,struct anv_cmd_buffer * cmd_buffer)1458 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1459 struct anv_cmd_buffer *cmd_buffer)
1460 {
1461 struct anv_batch *batch = &cmd_buffer->batch;
1462 struct anv_state_pool *ss_pool =
1463 &cmd_buffer->device->surface_state_pool;
1464
1465 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1466 cmd_buffer->last_ss_pool_center);
1467 VkResult result;
1468 if (cmd_buffer->device->physical->use_softpin) {
1469 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1470 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1471 bo, NULL, 0);
1472 if (result != VK_SUCCESS)
1473 return result;
1474 }
1475 /* Add surface dependencies (BOs) to the execbuf */
1476 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1477 cmd_buffer->surface_relocs.dep_words,
1478 cmd_buffer->surface_relocs.deps, 0);
1479
1480 /* Add the BOs for all memory objects */
1481 list_for_each_entry(struct anv_device_memory, mem,
1482 &cmd_buffer->device->memory_objects, link) {
1483 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1484 mem->bo, NULL, 0);
1485 if (result != VK_SUCCESS)
1486 return result;
1487 }
1488
1489 struct anv_block_pool *pool;
1490 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1491 anv_block_pool_foreach_bo(bo, pool) {
1492 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1493 bo, NULL, 0);
1494 if (result != VK_SUCCESS)
1495 return result;
1496 }
1497
1498 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1499 anv_block_pool_foreach_bo(bo, pool) {
1500 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1501 bo, NULL, 0);
1502 if (result != VK_SUCCESS)
1503 return result;
1504 }
1505
1506 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1507 anv_block_pool_foreach_bo(bo, pool) {
1508 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1509 bo, NULL, 0);
1510 if (result != VK_SUCCESS)
1511 return result;
1512 }
1513 } else {
1514 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1515 * will get added automatically by processing relocations on the batch
1516 * buffer. We have to add the surface state BO manually because it has
1517 * relocations of its own that we need to be sure are processsed.
1518 */
1519 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1520 ss_pool->block_pool.bo,
1521 &cmd_buffer->surface_relocs, 0);
1522 if (result != VK_SUCCESS)
1523 return result;
1524 }
1525
1526 /* First, we walk over all of the bos we've seen and add them and their
1527 * relocations to the validate list.
1528 */
1529 struct anv_batch_bo **bbo;
1530 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1531 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1532 cmd_buffer->last_ss_pool_center);
1533
1534 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1535 (*bbo)->bo, &(*bbo)->relocs, 0);
1536 if (result != VK_SUCCESS)
1537 return result;
1538 }
1539
1540 /* Now that we've adjusted all of the surface state relocations, we need to
1541 * record the surface state pool center so future executions of the command
1542 * buffer can adjust correctly.
1543 */
1544 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1545
1546 struct anv_batch_bo *first_batch_bo =
1547 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1548
1549 /* The kernel requires that the last entry in the validation list be the
1550 * batch buffer to execute. We can simply swap the element
1551 * corresponding to the first batch_bo in the chain with the last
1552 * element in the list.
1553 */
1554 if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1555 uint32_t idx = first_batch_bo->bo->index;
1556 uint32_t last_idx = execbuf->bo_count - 1;
1557
1558 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1559 assert(execbuf->bos[idx] == first_batch_bo->bo);
1560
1561 execbuf->objects[idx] = execbuf->objects[last_idx];
1562 execbuf->bos[idx] = execbuf->bos[last_idx];
1563 execbuf->bos[idx]->index = idx;
1564
1565 execbuf->objects[last_idx] = tmp_obj;
1566 execbuf->bos[last_idx] = first_batch_bo->bo;
1567 first_batch_bo->bo->index = last_idx;
1568 }
1569
1570 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1571 if (cmd_buffer->device->physical->use_softpin)
1572 assert(!execbuf->has_relocs);
1573
1574 /* Now we go through and fixup all of the relocation lists to point to
1575 * the correct indices in the object array. We have to do this after we
1576 * reorder the list above as some of the indices may have changed.
1577 */
1578 if (execbuf->has_relocs) {
1579 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1580 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1581
1582 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1583 }
1584
1585 if (!cmd_buffer->device->info.has_llc) {
1586 __builtin_ia32_mfence();
1587 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1588 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1589 __builtin_ia32_clflush((*bbo)->bo->map + i);
1590 }
1591 }
1592
1593 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1594 .buffers_ptr = (uintptr_t) execbuf->objects,
1595 .buffer_count = execbuf->bo_count,
1596 .batch_start_offset = 0,
1597 .batch_len = batch->next - batch->start,
1598 .cliprects_ptr = 0,
1599 .num_cliprects = 0,
1600 .DR1 = 0,
1601 .DR4 = 0,
1602 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1603 .rsvd1 = cmd_buffer->device->context_id,
1604 .rsvd2 = 0,
1605 };
1606
1607 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1608 /* If we were able to successfully relocate everything, tell the kernel
1609 * that it can skip doing relocations. The requirement for using
1610 * NO_RELOC is:
1611 *
1612 * 1) The addresses written in the objects must match the corresponding
1613 * reloc.presumed_offset which in turn must match the corresponding
1614 * execobject.offset.
1615 *
1616 * 2) To avoid stalling, execobject.offset should match the current
1617 * address of that object within the active context.
1618 *
1619 * In order to satisfy all of the invariants that make userspace
1620 * relocations to be safe (see relocate_cmd_buffer()), we need to
1621 * further ensure that the addresses we use match those used by the
1622 * kernel for the most recent execbuf2.
1623 *
1624 * The kernel may still choose to do relocations anyway if something has
1625 * moved in the GTT. In this case, the relocation list still needs to be
1626 * valid. All relocations on the batch buffers are already valid and
1627 * kept up-to-date. For surface state relocations, by applying the
1628 * relocations in relocate_cmd_buffer, we ensured that the address in
1629 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1630 * safe for the kernel to relocate them as needed.
1631 */
1632 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1633 } else {
1634 /* In the case where we fall back to doing kernel relocations, we need
1635 * to ensure that the relocation list is valid. All relocations on the
1636 * batch buffers are already valid and kept up-to-date. Since surface
1637 * states are shared between command buffers and we don't know what
1638 * order they will be submitted to the kernel, we don't know what
1639 * address is actually written in the surface state object at any given
1640 * time. The only option is to set a bogus presumed offset and let the
1641 * kernel relocate them.
1642 */
1643 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1644 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1645 }
1646
1647 return VK_SUCCESS;
1648 }
1649
1650 static VkResult
setup_empty_execbuf(struct anv_execbuf * execbuf,struct anv_device * device)1651 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1652 {
1653 VkResult result = anv_execbuf_add_bo(device, execbuf,
1654 device->trivial_batch_bo,
1655 NULL, 0);
1656 if (result != VK_SUCCESS)
1657 return result;
1658
1659 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1660 .buffers_ptr = (uintptr_t) execbuf->objects,
1661 .buffer_count = execbuf->bo_count,
1662 .batch_start_offset = 0,
1663 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1664 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
1665 .rsvd1 = device->context_id,
1666 .rsvd2 = 0,
1667 };
1668
1669 return VK_SUCCESS;
1670 }
1671
1672 /* We lock around execbuf for three main reasons:
1673 *
1674 * 1) When a block pool is resized, we create a new gem handle with a
1675 * different size and, in the case of surface states, possibly a different
1676 * center offset but we re-use the same anv_bo struct when we do so. If
1677 * this happens in the middle of setting up an execbuf, we could end up
1678 * with our list of BOs out of sync with our list of gem handles.
1679 *
1680 * 2) The algorithm we use for building the list of unique buffers isn't
1681 * thread-safe. While the client is supposed to syncronize around
1682 * QueueSubmit, this would be extremely difficult to debug if it ever came
1683 * up in the wild due to a broken app. It's better to play it safe and
1684 * just lock around QueueSubmit.
1685 *
1686 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1687 * userspace. Due to the fact that the surface state buffer is shared
1688 * between batches, we can't afford to have that happen from multiple
1689 * threads at the same time. Even though the user is supposed to ensure
1690 * this doesn't happen, we play it safe as in (2) above.
1691 *
1692 * Since the only other things that ever take the device lock such as block
1693 * pool resize only rarely happen, this will almost never be contended so
1694 * taking a lock isn't really an expensive operation in this case.
1695 */
1696 VkResult
anv_queue_execbuf_locked(struct anv_queue * queue,struct anv_queue_submit * submit)1697 anv_queue_execbuf_locked(struct anv_queue *queue,
1698 struct anv_queue_submit *submit)
1699 {
1700 struct anv_device *device = queue->device;
1701 struct anv_execbuf execbuf;
1702 anv_execbuf_init(&execbuf);
1703 execbuf.alloc = submit->alloc;
1704 execbuf.alloc_scope = submit->alloc_scope;
1705 execbuf.perf_query_pass = submit->perf_query_pass;
1706
1707 /* Always add the workaround BO as it includes a driver identifier for the
1708 * error_state.
1709 */
1710 VkResult result =
1711 anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
1712 if (result != VK_SUCCESS)
1713 goto error;
1714
1715 for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1716 int signaled;
1717 struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1718
1719 result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1720 signaled ? EXEC_OBJECT_WRITE : 0);
1721 if (result != VK_SUCCESS)
1722 goto error;
1723 }
1724
1725 if (submit->cmd_buffer) {
1726 result = setup_execbuf_for_cmd_buffer(&execbuf, submit->cmd_buffer);
1727 } else if (submit->simple_bo) {
1728 result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1729 if (result != VK_SUCCESS)
1730 goto error;
1731
1732 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1733 .buffers_ptr = (uintptr_t) execbuf.objects,
1734 .buffer_count = execbuf.bo_count,
1735 .batch_start_offset = 0,
1736 .batch_len = submit->simple_bo_size,
1737 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
1738 .rsvd1 = device->context_id,
1739 .rsvd2 = 0,
1740 };
1741 } else {
1742 result = setup_empty_execbuf(&execbuf, queue->device);
1743 }
1744
1745 if (result != VK_SUCCESS)
1746 goto error;
1747
1748 const bool has_perf_query =
1749 submit->perf_query_pass >= 0 &&
1750 submit->cmd_buffer &&
1751 submit->cmd_buffer->perf_query_pool;
1752
1753 if (INTEL_DEBUG & DEBUG_BATCH) {
1754 if (submit->cmd_buffer) {
1755 if (has_perf_query) {
1756 struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
1757 struct anv_bo *pass_batch_bo = query_pool->bo;
1758 uint64_t pass_batch_offset =
1759 khr_perf_query_preamble_offset(query_pool,
1760 submit->perf_query_pass);
1761
1762 gen_print_batch(&device->decoder_ctx,
1763 pass_batch_bo->map + pass_batch_offset, 64,
1764 pass_batch_bo->offset + pass_batch_offset, false);
1765 }
1766
1767 struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
1768 device->cmd_buffer_being_decoded = submit->cmd_buffer;
1769 gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1770 (*bo)->bo->size, (*bo)->bo->offset, false);
1771 device->cmd_buffer_being_decoded = NULL;
1772 } else if (submit->simple_bo) {
1773 gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1774 submit->simple_bo->size, submit->simple_bo->offset, false);
1775 } else {
1776 gen_print_batch(&device->decoder_ctx,
1777 device->trivial_batch_bo->map,
1778 device->trivial_batch_bo->size,
1779 device->trivial_batch_bo->offset, false);
1780 }
1781 }
1782
1783 if (submit->fence_count > 0) {
1784 assert(device->physical->has_syncobj);
1785 if (device->has_thread_submit) {
1786 execbuf.timeline_fences.fence_count = submit->fence_count;
1787 execbuf.timeline_fences.handles_ptr = (uintptr_t)submit->fences;
1788 execbuf.timeline_fences.values_ptr = (uintptr_t)submit->fence_values;
1789 anv_execbuf_add_ext(&execbuf,
1790 DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
1791 &execbuf.timeline_fences.base);
1792 } else {
1793 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1794 execbuf.execbuf.num_cliprects = submit->fence_count;
1795 execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1796 }
1797 }
1798
1799 if (submit->in_fence != -1) {
1800 assert(!device->has_thread_submit);
1801 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1802 execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
1803 }
1804
1805 if (submit->need_out_fence) {
1806 assert(!device->has_thread_submit);
1807 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1808 }
1809
1810 if (has_perf_query) {
1811 struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
1812 assert(submit->perf_query_pass < query_pool->n_passes);
1813 struct gen_perf_query_info *query_info =
1814 query_pool->pass_query[submit->perf_query_pass];
1815
1816 /* Some performance queries just the pipeline statistic HW, no need for
1817 * OA in that case, so no need to reconfigure.
1818 */
1819 if ((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0 &&
1820 (query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
1821 query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
1822 int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
1823 (void *)(uintptr_t) query_info->oa_metrics_set_id);
1824 if (ret < 0) {
1825 result = anv_device_set_lost(device,
1826 "i915-perf config failed: %s",
1827 strerror(errno));
1828 }
1829 }
1830
1831 struct anv_bo *pass_batch_bo = query_pool->bo;
1832
1833 struct drm_i915_gem_exec_object2 query_pass_object = {
1834 .handle = pass_batch_bo->gem_handle,
1835 .offset = pass_batch_bo->offset,
1836 .flags = pass_batch_bo->flags,
1837 };
1838 struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
1839 .buffers_ptr = (uintptr_t) &query_pass_object,
1840 .buffer_count = 1,
1841 .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
1842 submit->perf_query_pass),
1843 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1844 .rsvd1 = device->context_id,
1845 };
1846
1847 int ret = queue->device->no_hw ? 0 :
1848 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
1849 if (ret)
1850 result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
1851 }
1852
1853 int ret = queue->device->no_hw ? 0 :
1854 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1855 if (ret)
1856 result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
1857
1858 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
1859 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
1860 if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
1861 assert(execbuf.bos[k]->offset == objects[k].offset);
1862 execbuf.bos[k]->offset = objects[k].offset;
1863 }
1864
1865 if (result == VK_SUCCESS && submit->need_out_fence)
1866 submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
1867
1868 error:
1869 pthread_cond_broadcast(&device->queue_submit);
1870
1871 anv_execbuf_finish(&execbuf);
1872
1873 return result;
1874 }
1875