• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "anv_private.h"
25 
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28 
29 #include "common/gen_l3_config.h"
30 
31 /**
32  * This file implements some lightweight memcpy/memset operations on the GPU
33  * using a vertex buffer and streamout.
34  */
35 
36 /**
37  * Returns the greatest common divisor of a and b that is a power of two.
38  */
39 static uint64_t
gcd_pow2_u64(uint64_t a,uint64_t b)40 gcd_pow2_u64(uint64_t a, uint64_t b)
41 {
42    assert(a > 0 || b > 0);
43 
44    unsigned a_log2 = ffsll(a) - 1;
45    unsigned b_log2 = ffsll(b) - 1;
46 
47    /* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48     * case, the MIN2() will take the other one.  If both are 0 then we will
49     * hit the assert above.
50     */
51    return 1 << MIN2(a_log2, b_log2);
52 }
53 
54 void
genX(cmd_buffer_mi_memcpy)55 genX(cmd_buffer_mi_memcpy)(struct anv_cmd_buffer *cmd_buffer,
56                            struct anv_bo *dst, uint32_t dst_offset,
57                            struct anv_bo *src, uint32_t src_offset,
58                            uint32_t size)
59 {
60    /* This memcpy operates in units of dwords. */
61    assert(size % 4 == 0);
62    assert(dst_offset % 4 == 0);
63    assert(src_offset % 4 == 0);
64 
65    for (uint32_t i = 0; i < size; i += 4) {
66       const struct anv_address src_addr =
67          (struct anv_address) { src, src_offset + i};
68       const struct anv_address dst_addr =
69          (struct anv_address) { dst, dst_offset + i};
70 #if GEN_GEN >= 8
71       anv_batch_emit(&cmd_buffer->batch, GENX(MI_COPY_MEM_MEM), cp) {
72          cp.DestinationMemoryAddress = dst_addr;
73          cp.SourceMemoryAddress = src_addr;
74       }
75 #else
76       /* IVB does not have a general purpose register for command streamer
77        * commands. Therefore, we use an alternate temporary register.
78        */
79 #define TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
80       anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), load) {
81          load.RegisterAddress = TEMP_REG;
82          load.MemoryAddress = src_addr;
83       }
84       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), store) {
85          store.RegisterAddress = TEMP_REG;
86          store.MemoryAddress = dst_addr;
87       }
88 #undef TEMP_REG
89 #endif
90    }
91    return;
92 }
93 
94 void
genX(cmd_buffer_so_memcpy)95 genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
96                            struct anv_bo *dst, uint32_t dst_offset,
97                            struct anv_bo *src, uint32_t src_offset,
98                            uint32_t size)
99 {
100    if (size == 0)
101       return;
102 
103    assert(dst_offset + size <= dst->size);
104    assert(src_offset + size <= src->size);
105 
106    /* The maximum copy block size is 4 32-bit components at a time. */
107    unsigned bs = 16;
108    bs = gcd_pow2_u64(bs, src_offset);
109    bs = gcd_pow2_u64(bs, dst_offset);
110    bs = gcd_pow2_u64(bs, size);
111 
112    enum isl_format format;
113    switch (bs) {
114    case 4:  format = ISL_FORMAT_R32_UINT;          break;
115    case 8:  format = ISL_FORMAT_R32G32_UINT;       break;
116    case 16: format = ISL_FORMAT_R32G32B32A32_UINT; break;
117    default:
118       unreachable("Invalid size");
119    }
120 
121    if (!cmd_buffer->state.current_l3_config) {
122       const struct gen_l3_config *cfg =
123          gen_get_default_l3_config(&cmd_buffer->device->info);
124       genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
125    }
126 
127    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
128 
129    genX(flush_pipeline_select_3d)(cmd_buffer);
130 
131    uint32_t *dw;
132    dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_VERTEX_BUFFERS));
133    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, dw + 1,
134       &(struct GENX(VERTEX_BUFFER_STATE)) {
135          .VertexBufferIndex = 32, /* Reserved for this */
136          .AddressModifyEnable = true,
137          .BufferStartingAddress = { src, src_offset },
138          .BufferPitch = bs,
139 #if (GEN_GEN >= 8)
140          .MemoryObjectControlState = GENX(MOCS),
141          .BufferSize = size,
142 #else
143          .VertexBufferMemoryObjectControlState = GENX(MOCS),
144          .EndAddress = { src, src_offset + size - 1 },
145 #endif
146       });
147 
148    dw = anv_batch_emitn(&cmd_buffer->batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS));
149    GENX(VERTEX_ELEMENT_STATE_pack)(&cmd_buffer->batch, dw + 1,
150       &(struct GENX(VERTEX_ELEMENT_STATE)) {
151          .VertexBufferIndex = 32,
152          .Valid = true,
153          .SourceElementFormat = (enum GENX(SURFACE_FORMAT)) format,
154          .SourceElementOffset = 0,
155          .Component0Control = (bs >= 4) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
156          .Component1Control = (bs >= 8) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
157          .Component2Control = (bs >= 12) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
158          .Component3Control = (bs >= 16) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
159       });
160 
161 #if GEN_GEN >= 8
162    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_SGVS), sgvs);
163 #endif
164 
165    /* Disable all shader stages */
166    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VS), vs);
167    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HS), hs);
168    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_TE), te);
169    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DS), DS);
170    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_GS), gs);
171    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PS), gs);
172 
173    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SBE), sbe) {
174       sbe.VertexURBEntryReadOffset = 1;
175       sbe.NumberofSFOutputAttributes = 1;
176       sbe.VertexURBEntryReadLength = 1;
177 #if GEN_GEN >= 8
178       sbe.ForceVertexURBEntryReadLength = true;
179       sbe.ForceVertexURBEntryReadOffset = true;
180 #endif
181 
182 #if GEN_GEN >= 9
183       for (unsigned i = 0; i < 32; i++)
184          sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
185 #endif
186    }
187 
188    /* Emit URB setup.  We tell it that the VS is active because we want it to
189     * allocate space for the VS.  Even though one isn't run, we need VUEs to
190     * store the data that VF is going to pass to SOL.
191     */
192    const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
193 
194    genX(emit_urb_setup)(cmd_buffer->device, &cmd_buffer->batch,
195                         cmd_buffer->state.current_l3_config,
196                         VK_SHADER_STAGE_VERTEX_BIT, entry_size);
197 
198    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
199       sob.SOBufferIndex = 0;
200       sob.SOBufferObjectControlState = GENX(MOCS);
201       sob.SurfaceBaseAddress = (struct anv_address) { dst, dst_offset };
202 
203 #if GEN_GEN >= 8
204       sob.SOBufferEnable = true;
205       sob.SurfaceSize = size - 1;
206 #else
207       sob.SurfacePitch = bs;
208       sob.SurfaceEndAddress = sob.SurfaceBaseAddress;
209       sob.SurfaceEndAddress.offset += size;
210 #endif
211 
212 #if GEN_GEN >= 8
213       /* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
214        * the end position of the stream.  We need to reset this value to 0 at
215        * the beginning of the run or else SOL will start at the offset from
216        * the previous draw.
217        */
218       sob.StreamOffsetWriteEnable = true;
219       sob.StreamOffset = 0;
220 #endif
221    }
222 
223 #if GEN_GEN <= 7
224    /* The hardware can do this for us on BDW+ (see above) */
225    anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), load) {
226       load.RegisterOffset = GENX(SO_WRITE_OFFSET0_num);
227       load.DataDWord = 0;
228    }
229 #endif
230 
231    dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_SO_DECL_LIST),
232                         .StreamtoBufferSelects0 = (1 << 0),
233                         .NumEntries0 = 1);
234    GENX(SO_DECL_ENTRY_pack)(&cmd_buffer->batch, dw + 3,
235       &(struct GENX(SO_DECL_ENTRY)) {
236          .Stream0Decl = {
237             .OutputBufferSlot = 0,
238             .RegisterIndex = 0,
239             .ComponentMask = (1 << (bs / 4)) - 1,
240          },
241       });
242 
243    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STREAMOUT), so) {
244       so.SOFunctionEnable = true;
245       so.RenderingDisable = true;
246       so.Stream0VertexReadOffset = 0;
247       so.Stream0VertexReadLength = DIV_ROUND_UP(32, 64);
248 #if GEN_GEN >= 8
249       so.Buffer0SurfacePitch = bs;
250 #else
251       so.SOBufferEnable0 = true;
252 #endif
253    }
254 
255 #if GEN_GEN >= 8
256    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
257       topo.PrimitiveTopologyType = _3DPRIM_POINTLIST;
258    }
259 #endif
260 
261    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_STATISTICS), vf) {
262       vf.StatisticsEnable = false;
263    }
264 
265    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
266       prim.VertexAccessType         = SEQUENTIAL;
267       prim.PrimitiveTopologyType    = _3DPRIM_POINTLIST;
268       prim.VertexCountPerInstance   = size / bs;
269       prim.StartVertexLocation      = 0;
270       prim.InstanceCount            = 1;
271       prim.StartInstanceLocation    = 0;
272       prim.BaseVertexLocation       = 0;
273    }
274 
275    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
276 }
277