1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "anv_private.h"
27
28 /* These are defined in anv_private.h and blorp_genX_exec.h */
29 #undef __gen_address_type
30 #undef __gen_user_data
31 #undef __gen_combine_address
32
33 #include "common/gen_l3_config.h"
34 #include "common/gen_sample_positions.h"
35 #include "blorp/blorp_genX_exec.h"
36
37 static void *
blorp_emit_dwords(struct blorp_batch * batch,unsigned n)38 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
39 {
40 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
41 return anv_batch_emit_dwords(&cmd_buffer->batch, n);
42 }
43
44 static uint64_t
blorp_emit_reloc(struct blorp_batch * batch,void * location,struct blorp_address address,uint32_t delta)45 blorp_emit_reloc(struct blorp_batch *batch,
46 void *location, struct blorp_address address, uint32_t delta)
47 {
48 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
49 assert(cmd_buffer->batch.start <= location &&
50 location < cmd_buffer->batch.end);
51 return anv_batch_emit_reloc(&cmd_buffer->batch, location,
52 address.buffer, address.offset + delta);
53 }
54
55 static void
blorp_surface_reloc(struct blorp_batch * batch,uint32_t ss_offset,struct blorp_address address,uint32_t delta)56 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
57 struct blorp_address address, uint32_t delta)
58 {
59 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
60 uint64_t address_u64 = 0;
61 VkResult result =
62 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
63 ss_offset, address.buffer, address.offset + delta,
64 &address_u64);
65 if (result != VK_SUCCESS)
66 anv_batch_set_error(&cmd_buffer->batch, result);
67
68 void *dest = anv_block_pool_map(
69 &cmd_buffer->device->surface_state_pool.block_pool, ss_offset, 8);
70 write_reloc(cmd_buffer->device, dest, address_u64, false);
71 }
72
73 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address address)74 blorp_get_surface_address(struct blorp_batch *blorp_batch,
75 struct blorp_address address)
76 {
77 /* We'll let blorp_surface_reloc write the address. */
78 return 0ull;
79 }
80
81 #if GEN_GEN >= 7 && GEN_GEN < 10
82 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * batch)83 blorp_get_surface_base_address(struct blorp_batch *batch)
84 {
85 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
86 return (struct blorp_address) {
87 .buffer = cmd_buffer->device->surface_state_pool.block_pool.bo,
88 .offset = 0,
89 };
90 }
91 #endif
92
93 static void *
blorp_alloc_dynamic_state(struct blorp_batch * batch,uint32_t size,uint32_t alignment,uint32_t * offset)94 blorp_alloc_dynamic_state(struct blorp_batch *batch,
95 uint32_t size,
96 uint32_t alignment,
97 uint32_t *offset)
98 {
99 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
100
101 struct anv_state state =
102 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
103
104 *offset = state.offset;
105 return state.map;
106 }
107
108 static void
blorp_alloc_binding_table(struct blorp_batch * batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)109 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
110 unsigned state_size, unsigned state_alignment,
111 uint32_t *bt_offset,
112 uint32_t *surface_offsets, void **surface_maps)
113 {
114 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
115
116 uint32_t state_offset;
117 struct anv_state bt_state;
118
119 VkResult result =
120 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, num_entries,
121 &state_offset, &bt_state);
122 if (result != VK_SUCCESS)
123 return;
124
125 uint32_t *bt_map = bt_state.map;
126 *bt_offset = bt_state.offset;
127
128 for (unsigned i = 0; i < num_entries; i++) {
129 struct anv_state surface_state =
130 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
131 bt_map[i] = surface_state.offset + state_offset;
132 surface_offsets[i] = surface_state.offset;
133 surface_maps[i] = surface_state.map;
134 }
135 }
136
137 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * batch,uint32_t size,struct blorp_address * addr)138 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
139 struct blorp_address *addr)
140 {
141 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
142 struct anv_state vb_state =
143 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
144
145 *addr = (struct blorp_address) {
146 .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
147 .offset = vb_state.offset,
148 .mocs = isl_mocs(&cmd_buffer->device->isl_dev,
149 ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
150 };
151
152 return vb_state.map;
153 }
154
155 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * batch,const struct blorp_address * addrs,uint32_t * sizes,unsigned num_vbs)156 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
157 const struct blorp_address *addrs,
158 uint32_t *sizes,
159 unsigned num_vbs)
160 {
161 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
162
163 for (unsigned i = 0; i < num_vbs; i++) {
164 struct anv_address anv_addr = {
165 .bo = addrs[i].buffer,
166 .offset = addrs[i].offset,
167 };
168 genX(cmd_buffer_set_binding_for_gen8_vb_flush)(cmd_buffer,
169 i, anv_addr, sizes[i]);
170 }
171
172 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
173
174 /* Technically, we should call this *after* 3DPRIMITIVE but it doesn't
175 * really matter for blorp because we never call apply_pipe_flushes after
176 * this point.
177 */
178 genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(cmd_buffer, SEQUENTIAL,
179 (1 << num_vbs) - 1);
180 }
181
182 UNUSED static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * batch)183 blorp_get_workaround_address(struct blorp_batch *batch)
184 {
185 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
186
187 return (struct blorp_address) {
188 .buffer = cmd_buffer->device->workaround_address.bo,
189 .offset = cmd_buffer->device->workaround_address.offset,
190 };
191 }
192
193 static void
blorp_flush_range(struct blorp_batch * batch,void * start,size_t size)194 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
195 {
196 /* We don't need to flush states anymore, since everything will be snooped.
197 */
198 }
199
200 static const struct gen_l3_config *
blorp_get_l3_config(struct blorp_batch * batch)201 blorp_get_l3_config(struct blorp_batch *batch)
202 {
203 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
204 return cmd_buffer->state.current_l3_config;
205 }
206
207 void
genX(blorp_exec)208 genX(blorp_exec)(struct blorp_batch *batch,
209 const struct blorp_params *params)
210 {
211 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
212
213 if (!cmd_buffer->state.current_l3_config) {
214 const struct gen_l3_config *cfg =
215 gen_get_default_l3_config(&cmd_buffer->device->info);
216 genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
217 }
218
219 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
220 genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, params->x1 - params->x0,
221 params->y1 - params->y0, scale);
222
223 #if GEN_GEN >= 11
224 /* The PIPE_CONTROL command description says:
225 *
226 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
227 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
228 * Target Cache Flush by enabling this bit. When render target flush
229 * is set due to new association of BTI, PS Scoreboard Stall bit must
230 * be set in this packet."
231 */
232 cmd_buffer->state.pending_pipe_bits |=
233 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
234 ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
235 #endif
236
237 #if GEN_GEN == 7
238 /* The MI_LOAD/STORE_REGISTER_MEM commands which BLORP uses to implement
239 * indirect fast-clear colors can cause GPU hangs if we don't stall first.
240 * See genX(cmd_buffer_mi_memcpy) for more details.
241 */
242 if (params->src.clear_color_addr.buffer ||
243 params->dst.clear_color_addr.buffer)
244 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
245 #endif
246
247 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
248
249 genX(flush_pipeline_select_3d)(cmd_buffer);
250
251 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
252
253 /* BLORP doesn't do anything fancy with depth such as discards, so we want
254 * the PMA fix off. Also, off is always the safe option.
255 */
256 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
257
258 blorp_exec(batch, params);
259
260 #if GEN_GEN >= 11
261 /* The PIPE_CONTROL command description says:
262 *
263 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
264 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
265 * Target Cache Flush by enabling this bit. When render target flush
266 * is set due to new association of BTI, PS Scoreboard Stall bit must
267 * be set in this packet."
268 */
269 cmd_buffer->state.pending_pipe_bits |=
270 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
271 ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
272 #endif
273
274 cmd_buffer->state.gfx.vb_dirty = ~0;
275 cmd_buffer->state.gfx.dirty = ~0;
276 cmd_buffer->state.push_constants_dirty = ~0;
277 }
278