• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "anv_private.h"
25 
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28 
29 #include "common/gen_l3_config.h"
30 
31 /**
32  * This file implements some lightweight memcpy/memset operations on the GPU
33  * using a vertex buffer and streamout.
34  */
35 
36 /**
37  * Returns the greatest common divisor of a and b that is a power of two.
38  */
39 static uint64_t
gcd_pow2_u64(uint64_t a,uint64_t b)40 gcd_pow2_u64(uint64_t a, uint64_t b)
41 {
42    assert(a > 0 || b > 0);
43 
44    unsigned a_log2 = ffsll(a) - 1;
45    unsigned b_log2 = ffsll(b) - 1;
46 
47    /* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48     * case, the MIN2() will take the other one.  If both are 0 then we will
49     * hit the assert above.
50     */
51    return 1 << MIN2(a_log2, b_log2);
52 }
53 
54 void
genX(cmd_buffer_so_memcpy)55 genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
56                            struct anv_address dst, struct anv_address src,
57                            uint32_t size)
58 {
59    if (size == 0)
60       return;
61 
62    /* The maximum copy block size is 4 32-bit components at a time. */
63    assert(size % 4 == 0);
64    unsigned bs = gcd_pow2_u64(16, size);
65 
66    enum isl_format format;
67    switch (bs) {
68    case 4:  format = ISL_FORMAT_R32_UINT;          break;
69    case 8:  format = ISL_FORMAT_R32G32_UINT;       break;
70    case 16: format = ISL_FORMAT_R32G32B32A32_UINT; break;
71    default:
72       unreachable("Invalid size");
73    }
74 
75    if (!cmd_buffer->state.current_l3_config) {
76       const struct gen_l3_config *cfg =
77          gen_get_default_l3_config(&cmd_buffer->device->info);
78       genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
79    }
80 
81    genX(cmd_buffer_set_binding_for_gen8_vb_flush)(cmd_buffer, 32, src, size);
82    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
83 
84    genX(flush_pipeline_select_3d)(cmd_buffer);
85 
86    uint32_t *dw;
87    dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_VERTEX_BUFFERS));
88    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, dw + 1,
89       &(struct GENX(VERTEX_BUFFER_STATE)) {
90          .VertexBufferIndex = 32, /* Reserved for this */
91          .AddressModifyEnable = true,
92          .BufferStartingAddress = src,
93          .BufferPitch = bs,
94          .MOCS = anv_mocs(cmd_buffer->device, src.bo, 0),
95 #if (GEN_GEN >= 8)
96          .BufferSize = size,
97 #else
98          .EndAddress = anv_address_add(src, size - 1),
99 #endif
100       });
101 
102    dw = anv_batch_emitn(&cmd_buffer->batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS));
103    GENX(VERTEX_ELEMENT_STATE_pack)(&cmd_buffer->batch, dw + 1,
104       &(struct GENX(VERTEX_ELEMENT_STATE)) {
105          .VertexBufferIndex = 32,
106          .Valid = true,
107          .SourceElementFormat = format,
108          .SourceElementOffset = 0,
109          .Component0Control = (bs >= 4) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
110          .Component1Control = (bs >= 8) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
111          .Component2Control = (bs >= 12) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
112          .Component3Control = (bs >= 16) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
113       });
114 
115 #if GEN_GEN >= 8
116    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
117       vfi.InstancingEnable = false;
118       vfi.VertexElementIndex = 0;
119    }
120 #endif
121 
122 #if GEN_GEN >= 8
123    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_SGVS), sgvs);
124 #endif
125 
126    /* Disable all shader stages */
127    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VS), vs);
128    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HS), hs);
129    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_TE), te);
130    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DS), DS);
131    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_GS), gs);
132    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PS), gs);
133 
134    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SBE), sbe) {
135       sbe.VertexURBEntryReadOffset = 1;
136       sbe.NumberofSFOutputAttributes = 1;
137       sbe.VertexURBEntryReadLength = 1;
138 #if GEN_GEN >= 8
139       sbe.ForceVertexURBEntryReadLength = true;
140       sbe.ForceVertexURBEntryReadOffset = true;
141 #endif
142 
143 #if GEN_GEN >= 9
144       for (unsigned i = 0; i < 32; i++)
145          sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
146 #endif
147    }
148 
149    /* Emit URB setup.  We tell it that the VS is active because we want it to
150     * allocate space for the VS.  Even though one isn't run, we need VUEs to
151     * store the data that VF is going to pass to SOL.
152     */
153    const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
154 
155    genX(emit_urb_setup)(cmd_buffer->device, &cmd_buffer->batch,
156                         cmd_buffer->state.current_l3_config,
157                         VK_SHADER_STAGE_VERTEX_BIT, entry_size, NULL);
158 
159    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
160 #if GEN_GEN < 12
161       sob.SOBufferIndex = 0;
162 #else
163       sob._3DCommandOpcode = 0;
164       sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD;
165 #endif
166       sob.MOCS = anv_mocs(cmd_buffer->device, dst.bo, 0),
167       sob.SurfaceBaseAddress = dst;
168 
169 #if GEN_GEN >= 8
170       sob.SOBufferEnable = true;
171       sob.SurfaceSize = size / 4 - 1;
172 #else
173       sob.SurfacePitch = bs;
174       sob.SurfaceEndAddress = anv_address_add(dst, size);
175 #endif
176 
177 #if GEN_GEN >= 8
178       /* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
179        * the end position of the stream.  We need to reset this value to 0 at
180        * the beginning of the run or else SOL will start at the offset from
181        * the previous draw.
182        */
183       sob.StreamOffsetWriteEnable = true;
184       sob.StreamOffset = 0;
185 #endif
186    }
187 
188 #if GEN_GEN <= 7
189    /* The hardware can do this for us on BDW+ (see above) */
190    anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), load) {
191       load.RegisterOffset = GENX(SO_WRITE_OFFSET0_num);
192       load.DataDWord = 0;
193    }
194 #endif
195 
196    dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_SO_DECL_LIST),
197                         .StreamtoBufferSelects0 = (1 << 0),
198                         .NumEntries0 = 1);
199    GENX(SO_DECL_ENTRY_pack)(&cmd_buffer->batch, dw + 3,
200       &(struct GENX(SO_DECL_ENTRY)) {
201          .Stream0Decl = {
202             .OutputBufferSlot = 0,
203             .RegisterIndex = 0,
204             .ComponentMask = (1 << (bs / 4)) - 1,
205          },
206       });
207 
208    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STREAMOUT), so) {
209       so.SOFunctionEnable = true;
210       so.RenderingDisable = true;
211       so.Stream0VertexReadOffset = 0;
212       so.Stream0VertexReadLength = DIV_ROUND_UP(32, 64);
213 #if GEN_GEN >= 8
214       so.Buffer0SurfacePitch = bs;
215 #else
216       so.SOBufferEnable0 = true;
217 #endif
218    }
219 
220 #if GEN_GEN >= 8
221    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
222       topo.PrimitiveTopologyType = _3DPRIM_POINTLIST;
223    }
224 #endif
225 
226    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_STATISTICS), vf) {
227       vf.StatisticsEnable = false;
228    }
229 
230 #if GEN_GEN >= 12
231    /* Disable Primitive Replication. */
232    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PRIMITIVE_REPLICATION), pr);
233 #endif
234 
235    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
236       prim.VertexAccessType         = SEQUENTIAL;
237       prim.PrimitiveTopologyType    = _3DPRIM_POINTLIST;
238       prim.VertexCountPerInstance   = size / bs;
239       prim.StartVertexLocation      = 0;
240       prim.InstanceCount            = 1;
241       prim.StartInstanceLocation    = 0;
242       prim.BaseVertexLocation       = 0;
243    }
244 
245    genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(cmd_buffer, SEQUENTIAL,
246                                                        1ull << 32);
247 
248    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
249 }
250