1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "anv_private.h"
27
28 /* These are defined in anv_private.h and blorp_genX_exec.h */
29 #undef __gen_address_type
30 #undef __gen_user_data
31 #undef __gen_combine_address
32
33 #include "common/gen_l3_config.h"
34 #include "common/gen_sample_positions.h"
35 #include "blorp/blorp_genX_exec.h"
36
37 static void *
blorp_emit_dwords(struct blorp_batch * batch,unsigned n)38 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
39 {
40 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
41 return anv_batch_emit_dwords(&cmd_buffer->batch, n);
42 }
43
44 static uint64_t
blorp_emit_reloc(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)45 blorp_emit_reloc(struct blorp_batch *batch,
46 void *location, struct blorp_address address, uint32_t delta)
47 {
48 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
49 assert(cmd_buffer->batch.start <= location &&
50 location < cmd_buffer->batch.end);
51 return anv_batch_emit_reloc(&cmd_buffer->batch, location,
52 address.buffer, address.offset + delta);
53 }
54
55 static void
blorp_surface_reloc(struct blorp_batch * batch,uint32_t ss_offset,struct blorp_address address,uint32_t delta)56 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
57 struct blorp_address address, uint32_t delta)
58 {
59 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
60 VkResult result =
61 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
62 ss_offset, address.buffer, address.offset + delta);
63 if (result != VK_SUCCESS)
64 anv_batch_set_error(&cmd_buffer->batch, result);
65 }
66
67 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * batch)68 blorp_get_surface_base_address(struct blorp_batch *batch)
69 {
70 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
71 return (struct blorp_address) {
72 .buffer = &cmd_buffer->device->surface_state_pool.block_pool.bo,
73 .offset = 0,
74 };
75 }
76
77 static void *
blorp_alloc_dynamic_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)78 blorp_alloc_dynamic_state(struct blorp_batch *batch,
79 uint32_t size,
80 uint32_t alignment,
81 uint32_t *offset)
82 {
83 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
84
85 struct anv_state state =
86 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
87
88 *offset = state.offset;
89 return state.map;
90 }
91
92 static void
blorp_alloc_binding_table(struct blorp_batch * batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)93 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
94 unsigned state_size, unsigned state_alignment,
95 uint32_t *bt_offset,
96 uint32_t *surface_offsets, void **surface_maps)
97 {
98 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
99
100 uint32_t state_offset;
101 struct anv_state bt_state;
102
103 VkResult result =
104 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, num_entries,
105 &state_offset, &bt_state);
106 if (result != VK_SUCCESS)
107 return;
108
109 uint32_t *bt_map = bt_state.map;
110 *bt_offset = bt_state.offset;
111
112 for (unsigned i = 0; i < num_entries; i++) {
113 struct anv_state surface_state =
114 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
115 bt_map[i] = surface_state.offset + state_offset;
116 surface_offsets[i] = surface_state.offset;
117 surface_maps[i] = surface_state.map;
118 }
119
120 anv_state_flush(cmd_buffer->device, bt_state);
121 }
122
123 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * batch,uint32_t size,struct blorp_address * addr)124 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
125 struct blorp_address *addr)
126 {
127 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
128
129 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
130 *
131 * "The VF cache needs to be invalidated before binding and then using
132 * Vertex Buffers that overlap with any previously bound Vertex Buffer
133 * (at a 64B granularity) since the last invalidation. A VF cache
134 * invalidate is performed by setting the "VF Cache Invalidation Enable"
135 * bit in PIPE_CONTROL."
136 *
137 * This restriction first appears in the Skylake PRM but the internal docs
138 * also list it as being an issue on Broadwell. In order to avoid this
139 * problem, we align all vertex buffer allocations to 64 bytes.
140 */
141 struct anv_state vb_state =
142 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
143
144 *addr = (struct blorp_address) {
145 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
146 .offset = vb_state.offset,
147 .mocs = cmd_buffer->device->default_mocs,
148 };
149
150 return vb_state.map;
151 }
152
153 #if GEN_GEN >= 8
154 static struct blorp_address
blorp_get_workaround_page(struct blorp_batch * batch)155 blorp_get_workaround_page(struct blorp_batch *batch)
156 {
157 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
158
159 return (struct blorp_address) {
160 .buffer = &cmd_buffer->device->workaround_bo,
161 };
162 }
163 #endif
164
165 static void
blorp_flush_range(struct blorp_batch * batch,void * start,size_t size)166 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
167 {
168 struct anv_device *device = batch->blorp->driver_ctx;
169 if (!device->info.has_llc)
170 gen_flush_range(start, size);
171 }
172
173 static void
blorp_emit_urb_config(struct blorp_batch * batch,unsigned vs_entry_size,unsigned sf_entry_size)174 blorp_emit_urb_config(struct blorp_batch *batch,
175 unsigned vs_entry_size, unsigned sf_entry_size)
176 {
177 struct anv_device *device = batch->blorp->driver_ctx;
178 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
179
180 assert(sf_entry_size == 0);
181
182 const unsigned entry_size[4] = { vs_entry_size, 1, 1, 1 };
183
184 genX(emit_urb_setup)(device, &cmd_buffer->batch,
185 cmd_buffer->state.current_l3_config,
186 VK_SHADER_STAGE_VERTEX_BIT |
187 VK_SHADER_STAGE_FRAGMENT_BIT,
188 entry_size);
189 }
190
191 void
genX(blorp_exec)192 genX(blorp_exec)(struct blorp_batch *batch,
193 const struct blorp_params *params)
194 {
195 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
196
197 if (!cmd_buffer->state.current_l3_config) {
198 const struct gen_l3_config *cfg =
199 gen_get_default_l3_config(&cmd_buffer->device->info);
200 genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
201 }
202
203 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
204
205 genX(flush_pipeline_select_3d)(cmd_buffer);
206
207 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
208
209 /* BLORP doesn't do anything fancy with depth such as discards, so we want
210 * the PMA fix off. Also, off is always the safe option.
211 */
212 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
213
214 /* Disable VF statistics */
215 blorp_emit(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
216 vf.StatisticsEnable = false;
217 }
218
219 blorp_exec(batch, params);
220
221 cmd_buffer->state.gfx.vb_dirty = ~0;
222 cmd_buffer->state.gfx.dirty = ~0;
223 cmd_buffer->state.push_constants_dirty = ~0;
224 }
225