• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "anv_private.h"
31 
32 #include "genxml/gen8_pack.h"
33 
34 #include "util/debug.h"
35 
36 /** \file anv_batch_chain.c
37  *
38  * This file contains functions related to anv_cmd_buffer as a data
39  * structure.  This involves everything required to create and destroy
40  * the actual batch buffers as well as link them together and handle
41  * relocations and surface state.  It specifically does *not* contain any
42  * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43  */
44 
45 /*-----------------------------------------------------------------------*
46  * Functions related to anv_reloc_list
47  *-----------------------------------------------------------------------*/
48 
49 static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,const struct anv_reloc_list * other_list)50 anv_reloc_list_init_clone(struct anv_reloc_list *list,
51                           const VkAllocationCallbacks *alloc,
52                           const struct anv_reloc_list *other_list)
53 {
54    if (other_list) {
55       list->num_relocs = other_list->num_relocs;
56       list->array_length = other_list->array_length;
57    } else {
58       list->num_relocs = 0;
59       list->array_length = 256;
60    }
61 
62    list->relocs =
63       vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
64                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
65 
66    if (list->relocs == NULL)
67       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
68 
69    list->reloc_bos =
70       vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
71                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72 
73    if (list->reloc_bos == NULL) {
74       vk_free(alloc, list->relocs);
75       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76    }
77 
78    if (other_list) {
79       memcpy(list->relocs, other_list->relocs,
80              list->array_length * sizeof(*list->relocs));
81       memcpy(list->reloc_bos, other_list->reloc_bos,
82              list->array_length * sizeof(*list->reloc_bos));
83    }
84 
85    return VK_SUCCESS;
86 }
87 
88 VkResult
anv_reloc_list_init(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)89 anv_reloc_list_init(struct anv_reloc_list *list,
90                     const VkAllocationCallbacks *alloc)
91 {
92    return anv_reloc_list_init_clone(list, alloc, NULL);
93 }
94 
95 void
anv_reloc_list_finish(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)96 anv_reloc_list_finish(struct anv_reloc_list *list,
97                       const VkAllocationCallbacks *alloc)
98 {
99    vk_free(alloc, list->relocs);
100    vk_free(alloc, list->reloc_bos);
101 }
102 
103 static VkResult
anv_reloc_list_grow(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,size_t num_additional_relocs)104 anv_reloc_list_grow(struct anv_reloc_list *list,
105                     const VkAllocationCallbacks *alloc,
106                     size_t num_additional_relocs)
107 {
108    if (list->num_relocs + num_additional_relocs <= list->array_length)
109       return VK_SUCCESS;
110 
111    size_t new_length = list->array_length * 2;
112    while (new_length < list->num_relocs + num_additional_relocs)
113       new_length *= 2;
114 
115    struct drm_i915_gem_relocation_entry *new_relocs =
116       vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
117                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
118    if (new_relocs == NULL)
119       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
120 
121    struct anv_bo **new_reloc_bos =
122       vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
123                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
124    if (new_reloc_bos == NULL) {
125       vk_free(alloc, new_relocs);
126       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
127    }
128 
129    memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
130    memcpy(new_reloc_bos, list->reloc_bos,
131           list->num_relocs * sizeof(*list->reloc_bos));
132 
133    vk_free(alloc, list->relocs);
134    vk_free(alloc, list->reloc_bos);
135 
136    list->array_length = new_length;
137    list->relocs = new_relocs;
138    list->reloc_bos = new_reloc_bos;
139 
140    return VK_SUCCESS;
141 }
142 
143 uint64_t
anv_reloc_list_add(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t offset,struct anv_bo * target_bo,uint32_t delta)144 anv_reloc_list_add(struct anv_reloc_list *list,
145                    const VkAllocationCallbacks *alloc,
146                    uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
147 {
148    struct drm_i915_gem_relocation_entry *entry;
149    int index;
150 
151    const uint32_t domain =
152       target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0;
153 
154    anv_reloc_list_grow(list, alloc, 1);
155    /* TODO: Handle failure */
156 
157    /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
158    index = list->num_relocs++;
159    list->reloc_bos[index] = target_bo;
160    entry = &list->relocs[index];
161    entry->target_handle = target_bo->gem_handle;
162    entry->delta = delta;
163    entry->offset = offset;
164    entry->presumed_offset = target_bo->offset;
165    entry->read_domains = domain;
166    entry->write_domain = domain;
167    VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
168 
169    return target_bo->offset + delta;
170 }
171 
172 static void
anv_reloc_list_append(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_reloc_list * other,uint32_t offset)173 anv_reloc_list_append(struct anv_reloc_list *list,
174                       const VkAllocationCallbacks *alloc,
175                       struct anv_reloc_list *other, uint32_t offset)
176 {
177    anv_reloc_list_grow(list, alloc, other->num_relocs);
178    /* TODO: Handle failure */
179 
180    memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
181           other->num_relocs * sizeof(other->relocs[0]));
182    memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
183           other->num_relocs * sizeof(other->reloc_bos[0]));
184 
185    for (uint32_t i = 0; i < other->num_relocs; i++)
186       list->relocs[i + list->num_relocs].offset += offset;
187 
188    list->num_relocs += other->num_relocs;
189 }
190 
191 /*-----------------------------------------------------------------------*
192  * Functions related to anv_batch
193  *-----------------------------------------------------------------------*/
194 
195 void *
anv_batch_emit_dwords(struct anv_batch * batch,int num_dwords)196 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
197 {
198    if (batch->next + num_dwords * 4 > batch->end)
199       batch->extend_cb(batch, batch->user_data);
200 
201    void *p = batch->next;
202 
203    batch->next += num_dwords * 4;
204    assert(batch->next <= batch->end);
205 
206    return p;
207 }
208 
209 uint64_t
anv_batch_emit_reloc(struct anv_batch * batch,void * location,struct anv_bo * bo,uint32_t delta)210 anv_batch_emit_reloc(struct anv_batch *batch,
211                      void *location, struct anv_bo *bo, uint32_t delta)
212 {
213    return anv_reloc_list_add(batch->relocs, batch->alloc,
214                              location - batch->start, bo, delta);
215 }
216 
217 void
anv_batch_emit_batch(struct anv_batch * batch,struct anv_batch * other)218 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
219 {
220    uint32_t size, offset;
221 
222    size = other->next - other->start;
223    assert(size % 4 == 0);
224 
225    if (batch->next + size > batch->end)
226       batch->extend_cb(batch, batch->user_data);
227 
228    assert(batch->next + size <= batch->end);
229 
230    VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
231    memcpy(batch->next, other->start, size);
232 
233    offset = batch->next - batch->start;
234    anv_reloc_list_append(batch->relocs, batch->alloc,
235                          other->relocs, offset);
236 
237    batch->next += size;
238 }
239 
240 /*-----------------------------------------------------------------------*
241  * Functions related to anv_batch_bo
242  *-----------------------------------------------------------------------*/
243 
244 static VkResult
anv_batch_bo_create(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo ** bbo_out)245 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
246                     struct anv_batch_bo **bbo_out)
247 {
248    VkResult result;
249 
250    struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
251                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
252    if (bbo == NULL)
253       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
254 
255    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
256                               ANV_CMD_BUFFER_BATCH_SIZE);
257    if (result != VK_SUCCESS)
258       goto fail_alloc;
259 
260    result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
261    if (result != VK_SUCCESS)
262       goto fail_bo_alloc;
263 
264    *bbo_out = bbo;
265 
266    return VK_SUCCESS;
267 
268  fail_bo_alloc:
269    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
270  fail_alloc:
271    vk_free(&cmd_buffer->pool->alloc, bbo);
272 
273    return result;
274 }
275 
276 static VkResult
anv_batch_bo_clone(struct anv_cmd_buffer * cmd_buffer,const struct anv_batch_bo * other_bbo,struct anv_batch_bo ** bbo_out)277 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
278                    const struct anv_batch_bo *other_bbo,
279                    struct anv_batch_bo **bbo_out)
280 {
281    VkResult result;
282 
283    struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
284                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
285    if (bbo == NULL)
286       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
287 
288    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
289                               other_bbo->bo.size);
290    if (result != VK_SUCCESS)
291       goto fail_alloc;
292 
293    result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
294                                       &other_bbo->relocs);
295    if (result != VK_SUCCESS)
296       goto fail_bo_alloc;
297 
298    bbo->length = other_bbo->length;
299    memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
300 
301    *bbo_out = bbo;
302 
303    return VK_SUCCESS;
304 
305  fail_bo_alloc:
306    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
307  fail_alloc:
308    vk_free(&cmd_buffer->pool->alloc, bbo);
309 
310    return result;
311 }
312 
313 static void
anv_batch_bo_start(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)314 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
315                    size_t batch_padding)
316 {
317    batch->next = batch->start = bbo->bo.map;
318    batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
319    batch->relocs = &bbo->relocs;
320    bbo->relocs.num_relocs = 0;
321 }
322 
323 static void
anv_batch_bo_continue(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)324 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
325                       size_t batch_padding)
326 {
327    batch->start = bbo->bo.map;
328    batch->next = bbo->bo.map + bbo->length;
329    batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
330    batch->relocs = &bbo->relocs;
331 }
332 
333 static void
anv_batch_bo_finish(struct anv_batch_bo * bbo,struct anv_batch * batch)334 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
335 {
336    assert(batch->start == bbo->bo.map);
337    bbo->length = batch->next - batch->start;
338    VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
339 }
340 
341 static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo,struct anv_batch * batch,size_t aditional,size_t batch_padding)342 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
343                   struct anv_batch *batch, size_t aditional,
344                   size_t batch_padding)
345 {
346    assert(batch->start == bbo->bo.map);
347    bbo->length = batch->next - batch->start;
348 
349    size_t new_size = bbo->bo.size;
350    while (new_size <= bbo->length + aditional + batch_padding)
351       new_size *= 2;
352 
353    if (new_size == bbo->bo.size)
354       return VK_SUCCESS;
355 
356    struct anv_bo new_bo;
357    VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
358                                        &new_bo, new_size);
359    if (result != VK_SUCCESS)
360       return result;
361 
362    memcpy(new_bo.map, bbo->bo.map, bbo->length);
363 
364    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
365 
366    bbo->bo = new_bo;
367    anv_batch_bo_continue(bbo, batch, batch_padding);
368 
369    return VK_SUCCESS;
370 }
371 
372 static void
anv_batch_bo_destroy(struct anv_batch_bo * bbo,struct anv_cmd_buffer * cmd_buffer)373 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
374                      struct anv_cmd_buffer *cmd_buffer)
375 {
376    anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
377    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
378    vk_free(&cmd_buffer->pool->alloc, bbo);
379 }
380 
381 static VkResult
anv_batch_bo_list_clone(const struct list_head * list,struct anv_cmd_buffer * cmd_buffer,struct list_head * new_list)382 anv_batch_bo_list_clone(const struct list_head *list,
383                         struct anv_cmd_buffer *cmd_buffer,
384                         struct list_head *new_list)
385 {
386    VkResult result = VK_SUCCESS;
387 
388    list_inithead(new_list);
389 
390    struct anv_batch_bo *prev_bbo = NULL;
391    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
392       struct anv_batch_bo *new_bbo = NULL;
393       result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
394       if (result != VK_SUCCESS)
395          break;
396       list_addtail(&new_bbo->link, new_list);
397 
398       if (prev_bbo) {
399          /* As we clone this list of batch_bo's, they chain one to the
400           * other using MI_BATCH_BUFFER_START commands.  We need to fix up
401           * those relocations as we go.  Fortunately, this is pretty easy
402           * as it will always be the last relocation in the list.
403           */
404          uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
405          assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
406          prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
407       }
408 
409       prev_bbo = new_bbo;
410    }
411 
412    if (result != VK_SUCCESS) {
413       list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
414          anv_batch_bo_destroy(bbo, cmd_buffer);
415    }
416 
417    return result;
418 }
419 
420 /*-----------------------------------------------------------------------*
421  * Functions related to anv_batch_bo
422  *-----------------------------------------------------------------------*/
423 
424 static inline struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer * cmd_buffer)425 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
426 {
427    return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
428 }
429 
430 struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer * cmd_buffer)431 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
432 {
433    return (struct anv_address) {
434       .bo = &cmd_buffer->device->surface_state_block_pool.bo,
435       .offset = *(int32_t *)u_vector_head(&cmd_buffer->bt_blocks),
436    };
437 }
438 
439 static void
emit_batch_buffer_start(struct anv_cmd_buffer * cmd_buffer,struct anv_bo * bo,uint32_t offset)440 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
441                         struct anv_bo *bo, uint32_t offset)
442 {
443    /* In gen8+ the address field grew to two dwords to accomodate 48 bit
444     * offsets. The high 16 bits are in the last dword, so we can use the gen8
445     * version in either case, as long as we set the instruction length in the
446     * header accordingly.  This means that we always emit three dwords here
447     * and all the padding and adjustment we do in this file works for all
448     * gens.
449     */
450 
451 #define GEN7_MI_BATCH_BUFFER_START_length      2
452 #define GEN7_MI_BATCH_BUFFER_START_length_bias      2
453 
454    const uint32_t gen7_length =
455       GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
456    const uint32_t gen8_length =
457       GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
458 
459    anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
460       bbs.DWordLength               = cmd_buffer->device->info.gen < 8 ?
461                                       gen7_length : gen8_length;
462       bbs._2ndLevelBatchBuffer      = _1stlevelbatch;
463       bbs.AddressSpaceIndicator     = ASI_PPGTT;
464       bbs.BatchBufferStartAddress   = (struct anv_address) { bo, offset };
465    }
466 }
467 
468 static void
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo)469 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
470                              struct anv_batch_bo *bbo)
471 {
472    struct anv_batch *batch = &cmd_buffer->batch;
473    struct anv_batch_bo *current_bbo =
474       anv_cmd_buffer_current_batch_bo(cmd_buffer);
475 
476    /* We set the end of the batch a little short so we would be sure we
477     * have room for the chaining command.  Since we're about to emit the
478     * chaining command, let's set it back where it should go.
479     */
480    batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
481    assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
482 
483    emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
484 
485    anv_batch_bo_finish(current_bbo, batch);
486 }
487 
488 static VkResult
anv_cmd_buffer_chain_batch(struct anv_batch * batch,void * _data)489 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
490 {
491    struct anv_cmd_buffer *cmd_buffer = _data;
492    struct anv_batch_bo *new_bbo;
493 
494    VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
495    if (result != VK_SUCCESS)
496       return result;
497 
498    struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
499    if (seen_bbo == NULL) {
500       anv_batch_bo_destroy(new_bbo, cmd_buffer);
501       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
502    }
503    *seen_bbo = new_bbo;
504 
505    cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
506 
507    list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
508 
509    anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
510 
511    return VK_SUCCESS;
512 }
513 
514 static VkResult
anv_cmd_buffer_grow_batch(struct anv_batch * batch,void * _data)515 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
516 {
517    struct anv_cmd_buffer *cmd_buffer = _data;
518    struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
519 
520    anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
521                      GEN8_MI_BATCH_BUFFER_START_length * 4);
522 
523    return VK_SUCCESS;
524 }
525 
526 /** Allocate a binding table
527  *
528  * This function allocates a binding table.  This is a bit more complicated
529  * than one would think due to a combination of Vulkan driver design and some
530  * unfortunate hardware restrictions.
531  *
532  * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
533  * the binding table pointer which means that all binding tables need to live
534  * in the bottom 64k of surface state base address.  The way the GL driver has
535  * classically dealt with this restriction is to emit all surface states
536  * on-the-fly into the batch and have a batch buffer smaller than 64k.  This
537  * isn't really an option in Vulkan for a couple of reasons:
538  *
539  *  1) In Vulkan, we have growing (or chaining) batches so surface states have
540  *     to live in their own buffer and we have to be able to re-emit
541  *     STATE_BASE_ADDRESS as needed which requires a full pipeline stall.  In
542  *     order to avoid emitting STATE_BASE_ADDRESS any more often than needed
543  *     (it's not that hard to hit 64k of just binding tables), we allocate
544  *     surface state objects up-front when VkImageView is created.  In order
545  *     for this to work, surface state objects need to be allocated from a
546  *     global buffer.
547  *
548  *  2) We tried to design the surface state system in such a way that it's
549  *     already ready for bindless texturing.  The way bindless texturing works
550  *     on our hardware is that you have a big pool of surface state objects
551  *     (with its own state base address) and the bindless handles are simply
552  *     offsets into that pool.  With the architecture we chose, we already
553  *     have that pool and it's exactly the same pool that we use for regular
554  *     surface states so we should already be ready for bindless.
555  *
556  *  3) For render targets, we need to be able to fill out the surface states
557  *     later in vkBeginRenderPass so that we can assign clear colors
558  *     correctly.  One way to do this would be to just create the surface
559  *     state data and then repeatedly copy it into the surface state BO every
560  *     time we have to re-emit STATE_BASE_ADDRESS.  While this works, it's
561  *     rather annoying and just being able to allocate them up-front and
562  *     re-use them for the entire render pass.
563  *
564  * While none of these are technically blockers for emitting state on the fly
565  * like we do in GL, the ability to have a single surface state pool is
566  * simplifies things greatly.  Unfortunately, it comes at a cost...
567  *
568  * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
569  * place the binding tables just anywhere in surface state base address.
570  * Because 64k isn't a whole lot of space, we can't simply restrict the
571  * surface state buffer to 64k, we have to be more clever.  The solution we've
572  * chosen is to have a block pool with a maximum size of 2G that starts at
573  * zero and grows in both directions.  All surface states are allocated from
574  * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
575  * binding tables from the bottom of the pool (negative offsets).  Every time
576  * we allocate a new binding table block, we set surface state base address to
577  * point to the bottom of the binding table block.  This way all of the
578  * binding tables in the block are in the bottom 64k of surface state base
579  * address.  When we fill out the binding table, we add the distance between
580  * the bottom of our binding table block and zero of the block pool to the
581  * surface state offsets so that they are correct relative to out new surface
582  * state base address at the bottom of the binding table block.
583  *
584  * \see adjust_relocations_from_block_pool()
585  * \see adjust_relocations_too_block_pool()
586  *
587  * \param[in]  entries        The number of surface state entries the binding
588  *                            table should be able to hold.
589  *
590  * \param[out] state_offset   The offset surface surface state base address
591  *                            where the surface states live.  This must be
592  *                            added to the surface state offset when it is
593  *                            written into the binding table entry.
594  *
595  * \return                    An anv_state representing the binding table
596  */
597 struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t entries,uint32_t * state_offset)598 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
599                                    uint32_t entries, uint32_t *state_offset)
600 {
601    struct anv_block_pool *block_pool =
602        &cmd_buffer->device->surface_state_block_pool;
603    int32_t *bt_block = u_vector_head(&cmd_buffer->bt_blocks);
604    struct anv_state state;
605 
606    state.alloc_size = align_u32(entries * 4, 32);
607 
608    if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
609       return (struct anv_state) { 0 };
610 
611    state.offset = cmd_buffer->bt_next;
612    state.map = block_pool->map + *bt_block + state.offset;
613 
614    cmd_buffer->bt_next += state.alloc_size;
615 
616    assert(*bt_block < 0);
617    *state_offset = -(*bt_block);
618 
619    return state;
620 }
621 
622 struct anv_state
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer * cmd_buffer)623 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
624 {
625    struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
626    return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
627                                  isl_dev->ss.size, isl_dev->ss.align);
628 }
629 
630 struct anv_state
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer * cmd_buffer,uint32_t size,uint32_t alignment)631 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
632                                    uint32_t size, uint32_t alignment)
633 {
634    return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
635                                  size, alignment);
636 }
637 
638 VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer * cmd_buffer)639 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
640 {
641    struct anv_block_pool *block_pool =
642        &cmd_buffer->device->surface_state_block_pool;
643 
644    int32_t *offset = u_vector_add(&cmd_buffer->bt_blocks);
645    if (offset == NULL)
646       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
647 
648    *offset = anv_block_pool_alloc_back(block_pool);
649    cmd_buffer->bt_next = 0;
650 
651    return VK_SUCCESS;
652 }
653 
654 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)655 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
656 {
657    struct anv_batch_bo *batch_bo;
658    VkResult result;
659 
660    list_inithead(&cmd_buffer->batch_bos);
661 
662    result = anv_batch_bo_create(cmd_buffer, &batch_bo);
663    if (result != VK_SUCCESS)
664       return result;
665 
666    list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
667 
668    cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
669    cmd_buffer->batch.user_data = cmd_buffer;
670 
671    if (cmd_buffer->device->can_chain_batches) {
672       cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
673    } else {
674       cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
675    }
676 
677    anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
678                       GEN8_MI_BATCH_BUFFER_START_length * 4);
679 
680    int success = u_vector_init(&cmd_buffer->seen_bbos,
681                                  sizeof(struct anv_bo *),
682                                  8 * sizeof(struct anv_bo *));
683    if (!success)
684       goto fail_batch_bo;
685 
686    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
687 
688    success = u_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
689                              8 * sizeof(int32_t));
690    if (!success)
691       goto fail_seen_bbos;
692 
693    result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
694                                 &cmd_buffer->pool->alloc);
695    if (result != VK_SUCCESS)
696       goto fail_bt_blocks;
697    cmd_buffer->last_ss_pool_center = 0;
698 
699    anv_cmd_buffer_new_binding_table_block(cmd_buffer);
700 
701    return VK_SUCCESS;
702 
703  fail_bt_blocks:
704    u_vector_finish(&cmd_buffer->bt_blocks);
705  fail_seen_bbos:
706    u_vector_finish(&cmd_buffer->seen_bbos);
707  fail_batch_bo:
708    anv_batch_bo_destroy(batch_bo, cmd_buffer);
709 
710    return result;
711 }
712 
713 void
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)714 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
715 {
716    int32_t *bt_block;
717    u_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
718       anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
719                           *bt_block);
720    }
721    u_vector_finish(&cmd_buffer->bt_blocks);
722 
723    anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
724 
725    u_vector_finish(&cmd_buffer->seen_bbos);
726 
727    /* Destroy all of the batch buffers */
728    list_for_each_entry_safe(struct anv_batch_bo, bbo,
729                             &cmd_buffer->batch_bos, link) {
730       anv_batch_bo_destroy(bbo, cmd_buffer);
731    }
732 }
733 
734 void
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)735 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
736 {
737    /* Delete all but the first batch bo */
738    assert(!list_empty(&cmd_buffer->batch_bos));
739    while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
740       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
741       list_del(&bbo->link);
742       anv_batch_bo_destroy(bbo, cmd_buffer);
743    }
744    assert(!list_empty(&cmd_buffer->batch_bos));
745 
746    anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
747                       &cmd_buffer->batch,
748                       GEN8_MI_BATCH_BUFFER_START_length * 4);
749 
750    while (u_vector_length(&cmd_buffer->bt_blocks) > 1) {
751       int32_t *bt_block = u_vector_remove(&cmd_buffer->bt_blocks);
752       anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
753                           *bt_block);
754    }
755    assert(u_vector_length(&cmd_buffer->bt_blocks) == 1);
756    cmd_buffer->bt_next = 0;
757 
758    cmd_buffer->surface_relocs.num_relocs = 0;
759    cmd_buffer->last_ss_pool_center = 0;
760 
761    /* Reset the list of seen buffers */
762    cmd_buffer->seen_bbos.head = 0;
763    cmd_buffer->seen_bbos.tail = 0;
764 
765    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
766       anv_cmd_buffer_current_batch_bo(cmd_buffer);
767 }
768 
769 void
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer * cmd_buffer)770 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
771 {
772    struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
773 
774    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
775       /* When we start a batch buffer, we subtract a certain amount of
776        * padding from the end to ensure that we always have room to emit a
777        * BATCH_BUFFER_START to chain to the next BO.  We need to remove
778        * that padding before we end the batch; otherwise, we may end up
779        * with our BATCH_BUFFER_END in another BO.
780        */
781       cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
782       assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
783 
784       anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
785 
786       /* Round batch up to an even number of dwords. */
787       if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
788          anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
789 
790       cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
791    }
792 
793    anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
794 
795    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
796       /* If this is a secondary command buffer, we need to determine the
797        * mode in which it will be executed with vkExecuteCommands.  We
798        * determine this statically here so that this stays in sync with the
799        * actual ExecuteCommands implementation.
800        */
801       if (!cmd_buffer->device->can_chain_batches) {
802          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
803       } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
804           (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
805          /* If the secondary has exactly one batch buffer in its list *and*
806           * that batch buffer is less than half of the maximum size, we're
807           * probably better of simply copying it into our batch.
808           */
809          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
810       } else if (!(cmd_buffer->usage_flags &
811                    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
812          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
813 
814          /* When we chain, we need to add an MI_BATCH_BUFFER_START command
815           * with its relocation.  In order to handle this we'll increment here
816           * so we can unconditionally decrement right before adding the
817           * MI_BATCH_BUFFER_START command.
818           */
819          batch_bo->relocs.num_relocs++;
820          cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
821       } else {
822          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
823       }
824    }
825 }
826 
827 static inline VkResult
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer * cmd_buffer,struct list_head * list)828 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
829                              struct list_head *list)
830 {
831    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
832       struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
833       if (bbo_ptr == NULL)
834          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
835 
836       *bbo_ptr = bbo;
837    }
838 
839    return VK_SUCCESS;
840 }
841 
842 void
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer * primary,struct anv_cmd_buffer * secondary)843 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
844                              struct anv_cmd_buffer *secondary)
845 {
846    switch (secondary->exec_mode) {
847    case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
848       anv_batch_emit_batch(&primary->batch, &secondary->batch);
849       break;
850    case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
851       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
852       unsigned length = secondary->batch.end - secondary->batch.start;
853       anv_batch_bo_grow(primary, bbo, &primary->batch, length,
854                         GEN8_MI_BATCH_BUFFER_START_length * 4);
855       anv_batch_emit_batch(&primary->batch, &secondary->batch);
856       break;
857    }
858    case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
859       struct anv_batch_bo *first_bbo =
860          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
861       struct anv_batch_bo *last_bbo =
862          list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
863 
864       emit_batch_buffer_start(primary, &first_bbo->bo, 0);
865 
866       struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
867       assert(primary->batch.start == this_bbo->bo.map);
868       uint32_t offset = primary->batch.next - primary->batch.start;
869       const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
870 
871       /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
872        * can emit a new command and relocation for the current splice.  In
873        * order to handle the initial-use case, we incremented next and
874        * num_relocs in end_batch_buffer() so we can alyways just subtract
875        * here.
876        */
877       last_bbo->relocs.num_relocs--;
878       secondary->batch.next -= inst_size;
879       emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
880       anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
881 
882       /* After patching up the secondary buffer, we need to clflush the
883        * modified instruction in case we're on a !llc platform. We use a
884        * little loop to handle the case where the instruction crosses a cache
885        * line boundary.
886        */
887       if (!primary->device->info.has_llc) {
888          void *inst = secondary->batch.next - inst_size;
889          void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
890          __builtin_ia32_mfence();
891          while (p < secondary->batch.next) {
892             __builtin_ia32_clflush(p);
893             p += CACHELINE_SIZE;
894          }
895       }
896       break;
897    }
898    case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
899       struct list_head copy_list;
900       VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
901                                                 secondary,
902                                                 &copy_list);
903       if (result != VK_SUCCESS)
904          return; /* FIXME */
905 
906       anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
907 
908       struct anv_batch_bo *first_bbo =
909          list_first_entry(&copy_list, struct anv_batch_bo, link);
910       struct anv_batch_bo *last_bbo =
911          list_last_entry(&copy_list, struct anv_batch_bo, link);
912 
913       cmd_buffer_chain_to_batch_bo(primary, first_bbo);
914 
915       list_splicetail(&copy_list, &primary->batch_bos);
916 
917       anv_batch_bo_continue(last_bbo, &primary->batch,
918                             GEN8_MI_BATCH_BUFFER_START_length * 4);
919       break;
920    }
921    default:
922       assert(!"Invalid execution mode");
923    }
924 
925    anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
926                          &secondary->surface_relocs, 0);
927 }
928 
929 struct anv_execbuf {
930    struct drm_i915_gem_execbuffer2           execbuf;
931 
932    struct drm_i915_gem_exec_object2 *        objects;
933    uint32_t                                  bo_count;
934    struct anv_bo **                          bos;
935 
936    /* Allocated length of the 'objects' and 'bos' arrays */
937    uint32_t                                  array_length;
938 };
939 
940 static void
anv_execbuf_init(struct anv_execbuf * exec)941 anv_execbuf_init(struct anv_execbuf *exec)
942 {
943    memset(exec, 0, sizeof(*exec));
944 }
945 
946 static void
anv_execbuf_finish(struct anv_execbuf * exec,const VkAllocationCallbacks * alloc)947 anv_execbuf_finish(struct anv_execbuf *exec,
948                    const VkAllocationCallbacks *alloc)
949 {
950    vk_free(alloc, exec->objects);
951    vk_free(alloc, exec->bos);
952 }
953 
954 static VkResult
anv_execbuf_add_bo(struct anv_execbuf * exec,struct anv_bo * bo,struct anv_reloc_list * relocs,const VkAllocationCallbacks * alloc)955 anv_execbuf_add_bo(struct anv_execbuf *exec,
956                    struct anv_bo *bo,
957                    struct anv_reloc_list *relocs,
958                    const VkAllocationCallbacks *alloc)
959 {
960    struct drm_i915_gem_exec_object2 *obj = NULL;
961 
962    if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
963       obj = &exec->objects[bo->index];
964 
965    if (obj == NULL) {
966       /* We've never seen this one before.  Add it to the list and assign
967        * an id that we can use later.
968        */
969       if (exec->bo_count >= exec->array_length) {
970          uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
971 
972          struct drm_i915_gem_exec_object2 *new_objects =
973             vk_alloc(alloc, new_len * sizeof(*new_objects),
974                      8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
975          if (new_objects == NULL)
976             return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
977 
978          struct anv_bo **new_bos =
979             vk_alloc(alloc, new_len * sizeof(*new_bos),
980                       8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
981          if (new_bos == NULL) {
982             vk_free(alloc, new_objects);
983             return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
984          }
985 
986          if (exec->objects) {
987             memcpy(new_objects, exec->objects,
988                    exec->bo_count * sizeof(*new_objects));
989             memcpy(new_bos, exec->bos,
990                    exec->bo_count * sizeof(*new_bos));
991          }
992 
993          vk_free(alloc, exec->objects);
994          vk_free(alloc, exec->bos);
995 
996          exec->objects = new_objects;
997          exec->bos = new_bos;
998          exec->array_length = new_len;
999       }
1000 
1001       assert(exec->bo_count < exec->array_length);
1002 
1003       bo->index = exec->bo_count++;
1004       obj = &exec->objects[bo->index];
1005       exec->bos[bo->index] = bo;
1006 
1007       obj->handle = bo->gem_handle;
1008       obj->relocation_count = 0;
1009       obj->relocs_ptr = 0;
1010       obj->alignment = 0;
1011       obj->offset = bo->offset;
1012       obj->flags = bo->is_winsys_bo ? EXEC_OBJECT_WRITE : 0;
1013       obj->rsvd1 = 0;
1014       obj->rsvd2 = 0;
1015    }
1016 
1017    if (relocs != NULL && obj->relocation_count == 0) {
1018       /* This is the first time we've ever seen a list of relocations for
1019        * this BO.  Go ahead and set the relocations and then walk the list
1020        * of relocations and add them all.
1021        */
1022       obj->relocation_count = relocs->num_relocs;
1023       obj->relocs_ptr = (uintptr_t) relocs->relocs;
1024 
1025       for (size_t i = 0; i < relocs->num_relocs; i++) {
1026          /* A quick sanity check on relocations */
1027          assert(relocs->relocs[i].offset < bo->size);
1028          anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL, alloc);
1029       }
1030    }
1031 
1032    return VK_SUCCESS;
1033 }
1034 
1035 static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_reloc_list * list)1036 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1037                               struct anv_reloc_list *list)
1038 {
1039    for (size_t i = 0; i < list->num_relocs; i++)
1040       list->relocs[i].target_handle = list->reloc_bos[i]->index;
1041 }
1042 
1043 static void
write_reloc(const struct anv_device * device,void * p,uint64_t v,bool flush)1044 write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1045 {
1046    unsigned reloc_size = 0;
1047    if (device->info.gen >= 8) {
1048       /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
1049        *
1050        *    "This field specifies the address of the memory location where the
1051        *    register value specified in the DWord above will read from. The
1052        *    address specifies the DWord location of the data. Range =
1053        *    GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
1054        *    [63:48] are ignored by the HW and assumed to be in correct
1055        *    canonical form [63:48] == [47]."
1056        */
1057       const int shift = 63 - 47;
1058       reloc_size = sizeof(uint64_t);
1059       *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
1060    } else {
1061       reloc_size = sizeof(uint32_t);
1062       *(uint32_t *)p = v;
1063    }
1064 
1065    if (flush && !device->info.has_llc)
1066       anv_clflush_range(p, reloc_size);
1067 }
1068 
1069 static void
adjust_relocations_from_state_pool(struct anv_block_pool * pool,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1070 adjust_relocations_from_state_pool(struct anv_block_pool *pool,
1071                                    struct anv_reloc_list *relocs,
1072                                    uint32_t last_pool_center_bo_offset)
1073 {
1074    assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1075    uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1076 
1077    for (size_t i = 0; i < relocs->num_relocs; i++) {
1078       /* All of the relocations from this block pool to other BO's should
1079        * have been emitted relative to the surface block pool center.  We
1080        * need to add the center offset to make them relative to the
1081        * beginning of the actual GEM bo.
1082        */
1083       relocs->relocs[i].offset += delta;
1084    }
1085 }
1086 
1087 static void
adjust_relocations_to_state_pool(struct anv_block_pool * pool,struct anv_bo * from_bo,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1088 adjust_relocations_to_state_pool(struct anv_block_pool *pool,
1089                                  struct anv_bo *from_bo,
1090                                  struct anv_reloc_list *relocs,
1091                                  uint32_t last_pool_center_bo_offset)
1092 {
1093    assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1094    uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1095 
1096    /* When we initially emit relocations into a block pool, we don't
1097     * actually know what the final center_bo_offset will be so we just emit
1098     * it as if center_bo_offset == 0.  Now that we know what the center
1099     * offset is, we need to walk the list of relocations and adjust any
1100     * relocations that point to the pool bo with the correct offset.
1101     */
1102    for (size_t i = 0; i < relocs->num_relocs; i++) {
1103       if (relocs->reloc_bos[i] == &pool->bo) {
1104          /* Adjust the delta value in the relocation to correctly
1105           * correspond to the new delta.  Initially, this value may have
1106           * been negative (if treated as unsigned), but we trust in
1107           * uint32_t roll-over to fix that for us at this point.
1108           */
1109          relocs->relocs[i].delta += delta;
1110 
1111          /* Since the delta has changed, we need to update the actual
1112           * relocated value with the new presumed value.  This function
1113           * should only be called on batch buffers, so we know it isn't in
1114           * use by the GPU at the moment.
1115           */
1116          assert(relocs->relocs[i].offset < from_bo->size);
1117          write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
1118                      relocs->relocs[i].presumed_offset +
1119                      relocs->relocs[i].delta, false);
1120       }
1121    }
1122 }
1123 
1124 static void
anv_reloc_list_apply(struct anv_device * device,struct anv_reloc_list * list,struct anv_bo * bo,bool always_relocate)1125 anv_reloc_list_apply(struct anv_device *device,
1126                      struct anv_reloc_list *list,
1127                      struct anv_bo *bo,
1128                      bool always_relocate)
1129 {
1130    for (size_t i = 0; i < list->num_relocs; i++) {
1131       struct anv_bo *target_bo = list->reloc_bos[i];
1132       if (list->relocs[i].presumed_offset == target_bo->offset &&
1133           !always_relocate)
1134          continue;
1135 
1136       void *p = bo->map + list->relocs[i].offset;
1137       write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1138       list->relocs[i].presumed_offset = target_bo->offset;
1139    }
1140 }
1141 
1142 /**
1143  * This function applies the relocation for a command buffer and writes the
1144  * actual addresses into the buffers as per what we were told by the kernel on
1145  * the previous execbuf2 call.  This should be safe to do because, for each
1146  * relocated address, we have two cases:
1147  *
1148  *  1) The target BO is inactive (as seen by the kernel).  In this case, it is
1149  *     not in use by the GPU so updating the address is 100% ok.  It won't be
1150  *     in-use by the GPU (from our context) again until the next execbuf2
1151  *     happens.  If the kernel decides to move it in the next execbuf2, it
1152  *     will have to do the relocations itself, but that's ok because it should
1153  *     have all of the information needed to do so.
1154  *
1155  *  2) The target BO is active (as seen by the kernel).  In this case, it
1156  *     hasn't moved since the last execbuffer2 call because GTT shuffling
1157  *     *only* happens when the BO is idle. (From our perspective, it only
1158  *     happens inside the execbuffer2 ioctl, but the shuffling may be
1159  *     triggered by another ioctl, with full-ppgtt this is limited to only
1160  *     execbuffer2 ioctls on the same context, or memory pressure.)  Since the
1161  *     target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1162  *     address and the relocated value we are writing into the BO will be the
1163  *     same as the value that is already there.
1164  *
1165  *     There is also a possibility that the target BO is active but the exact
1166  *     RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1167  *     use.  In this case, the address currently in the RENDER_SURFACE_STATE
1168  *     may be stale but it's still safe to write the relocation because that
1169  *     particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1170  *     won't be until the next execbuf2 call.
1171  *
1172  * By doing relocations on the CPU, we can tell the kernel that it doesn't
1173  * need to bother.  We want to do this because the surface state buffer is
1174  * used by every command buffer so, if the kernel does the relocations, it
1175  * will always be busy and the kernel will always stall.  This is also
1176  * probably the fastest mechanism for doing relocations since the kernel would
1177  * have to make a full copy of all the relocations lists.
1178  */
1179 static bool
relocate_cmd_buffer(struct anv_cmd_buffer * cmd_buffer,struct anv_execbuf * exec)1180 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1181                     struct anv_execbuf *exec)
1182 {
1183    static int userspace_relocs = -1;
1184    if (userspace_relocs < 0)
1185       userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1186    if (!userspace_relocs)
1187       return false;
1188 
1189    /* First, we have to check to see whether or not we can even do the
1190     * relocation.  New buffers which have never been submitted to the kernel
1191     * don't have a valid offset so we need to let the kernel do relocations so
1192     * that we can get offsets for them.  On future execbuf2 calls, those
1193     * buffers will have offsets and we will be able to skip relocating.
1194     * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1195     */
1196    for (uint32_t i = 0; i < exec->bo_count; i++) {
1197       if (exec->bos[i]->offset == (uint64_t)-1)
1198          return false;
1199    }
1200 
1201    /* Since surface states are shared between command buffers and we don't
1202     * know what order they will be submitted to the kernel, we don't know
1203     * what address is actually written in the surface state object at any
1204     * given time.  The only option is to always relocate them.
1205     */
1206    anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1207                         &cmd_buffer->device->surface_state_block_pool.bo,
1208                         true /* always relocate surface states */);
1209 
1210    /* Since we own all of the batch buffers, we know what values are stored
1211     * in the relocated addresses and only have to update them if the offsets
1212     * have changed.
1213     */
1214    struct anv_batch_bo **bbo;
1215    u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1216       anv_reloc_list_apply(cmd_buffer->device,
1217                            &(*bbo)->relocs, &(*bbo)->bo, false);
1218    }
1219 
1220    for (uint32_t i = 0; i < exec->bo_count; i++)
1221       exec->objects[i].offset = exec->bos[i]->offset;
1222 
1223    return true;
1224 }
1225 
1226 VkResult
anv_cmd_buffer_execbuf(struct anv_device * device,struct anv_cmd_buffer * cmd_buffer)1227 anv_cmd_buffer_execbuf(struct anv_device *device,
1228                        struct anv_cmd_buffer *cmd_buffer)
1229 {
1230    struct anv_batch *batch = &cmd_buffer->batch;
1231    struct anv_block_pool *ss_pool =
1232       &cmd_buffer->device->surface_state_block_pool;
1233 
1234    struct anv_execbuf execbuf;
1235    anv_execbuf_init(&execbuf);
1236 
1237    adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1238                                       cmd_buffer->last_ss_pool_center);
1239    anv_execbuf_add_bo(&execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs,
1240                       &cmd_buffer->pool->alloc);
1241 
1242    /* First, we walk over all of the bos we've seen and add them and their
1243     * relocations to the validate list.
1244     */
1245    struct anv_batch_bo **bbo;
1246    u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1247       adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1248                                        cmd_buffer->last_ss_pool_center);
1249 
1250       anv_execbuf_add_bo(&execbuf, &(*bbo)->bo, &(*bbo)->relocs,
1251                          &cmd_buffer->pool->alloc);
1252    }
1253 
1254    /* Now that we've adjusted all of the surface state relocations, we need to
1255     * record the surface state pool center so future executions of the command
1256     * buffer can adjust correctly.
1257     */
1258    cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
1259 
1260    struct anv_batch_bo *first_batch_bo =
1261       list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1262 
1263    /* The kernel requires that the last entry in the validation list be the
1264     * batch buffer to execute.  We can simply swap the element
1265     * corresponding to the first batch_bo in the chain with the last
1266     * element in the list.
1267     */
1268    if (first_batch_bo->bo.index != execbuf.bo_count - 1) {
1269       uint32_t idx = first_batch_bo->bo.index;
1270       uint32_t last_idx = execbuf.bo_count - 1;
1271 
1272       struct drm_i915_gem_exec_object2 tmp_obj = execbuf.objects[idx];
1273       assert(execbuf.bos[idx] == &first_batch_bo->bo);
1274 
1275       execbuf.objects[idx] = execbuf.objects[last_idx];
1276       execbuf.bos[idx] = execbuf.bos[last_idx];
1277       execbuf.bos[idx]->index = idx;
1278 
1279       execbuf.objects[last_idx] = tmp_obj;
1280       execbuf.bos[last_idx] = &first_batch_bo->bo;
1281       first_batch_bo->bo.index = last_idx;
1282    }
1283 
1284    /* Now we go through and fixup all of the relocation lists to point to
1285     * the correct indices in the object array.  We have to do this after we
1286     * reorder the list above as some of the indices may have changed.
1287     */
1288    u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1289       anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1290 
1291    anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1292 
1293    if (!cmd_buffer->device->info.has_llc) {
1294       __builtin_ia32_mfence();
1295       u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1296          for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1297             __builtin_ia32_clflush((*bbo)->bo.map + i);
1298       }
1299    }
1300 
1301    execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1302       .buffers_ptr = (uintptr_t) execbuf.objects,
1303       .buffer_count = execbuf.bo_count,
1304       .batch_start_offset = 0,
1305       .batch_len = batch->next - batch->start,
1306       .cliprects_ptr = 0,
1307       .num_cliprects = 0,
1308       .DR1 = 0,
1309       .DR4 = 0,
1310       .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1311                I915_EXEC_CONSTANTS_REL_GENERAL,
1312       .rsvd1 = cmd_buffer->device->context_id,
1313       .rsvd2 = 0,
1314    };
1315 
1316    if (relocate_cmd_buffer(cmd_buffer, &execbuf)) {
1317       /* If we were able to successfully relocate everything, tell the kernel
1318        * that it can skip doing relocations. The requirement for using
1319        * NO_RELOC is:
1320        *
1321        *  1) The addresses written in the objects must match the corresponding
1322        *     reloc.presumed_offset which in turn must match the corresponding
1323        *     execobject.offset.
1324        *
1325        *  2) To avoid stalling, execobject.offset should match the current
1326        *     address of that object within the active context.
1327        *
1328        * In order to satisfy all of the invariants that make userspace
1329        * relocations to be safe (see relocate_cmd_buffer()), we need to
1330        * further ensure that the addresses we use match those used by the
1331        * kernel for the most recent execbuf2.
1332        *
1333        * The kernel may still choose to do relocations anyway if something has
1334        * moved in the GTT. In this case, the relocation list still needs to be
1335        * valid.  All relocations on the batch buffers are already valid and
1336        * kept up-to-date.  For surface state relocations, by applying the
1337        * relocations in relocate_cmd_buffer, we ensured that the address in
1338        * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1339        * safe for the kernel to relocate them as needed.
1340        */
1341       execbuf.execbuf.flags |= I915_EXEC_NO_RELOC;
1342    } else {
1343       /* In the case where we fall back to doing kernel relocations, we need
1344        * to ensure that the relocation list is valid.  All relocations on the
1345        * batch buffers are already valid and kept up-to-date.  Since surface
1346        * states are shared between command buffers and we don't know what
1347        * order they will be submitted to the kernel, we don't know what
1348        * address is actually written in the surface state object at any given
1349        * time.  The only option is to set a bogus presumed offset and let the
1350        * kernel relocate them.
1351        */
1352       for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1353          cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1354    }
1355 
1356    VkResult result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1357 
1358    anv_execbuf_finish(&execbuf, &cmd_buffer->pool->alloc);
1359 
1360    return result;
1361 }
1362