• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2007 VMware, Inc.
3  * Copyright 2016 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 /**
26  * \file
27  *
28  * Common helper functions for PBO up- and downloads.
29  */
30 
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_nir.h"
33 #include "state_tracker/st_pbo.h"
34 
35 #include "main/context.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_screen.h"
39 #include "cso_cache/cso_context.h"
40 #include "util/format/u_format.h"
41 #include "util/u_inlines.h"
42 #include "util/u_upload_mgr.h"
43 
44 #include "compiler/nir/nir_builder.h"
45 
46 /* Final setup of buffer addressing information.
47  *
48  * buf_offset is in pixels.
49  *
50  * Returns false if something (e.g. alignment) prevents PBO upload/download.
51  */
52 bool
st_pbo_addresses_setup(struct st_context * st,struct pipe_resource * buf,intptr_t buf_offset,struct st_pbo_addresses * addr)53 st_pbo_addresses_setup(struct st_context *st,
54                        struct pipe_resource *buf, intptr_t buf_offset,
55                        struct st_pbo_addresses *addr)
56 {
57    unsigned skip_pixels;
58 
59    /* Check alignment against texture buffer requirements. */
60    {
61       unsigned ofs = (buf_offset * addr->bytes_per_pixel) % st->ctx->Const.TextureBufferOffsetAlignment;
62       if (ofs != 0) {
63          if (ofs % addr->bytes_per_pixel != 0)
64             return false;
65 
66          skip_pixels = ofs / addr->bytes_per_pixel;
67          buf_offset -= skip_pixels;
68       } else {
69          skip_pixels = 0;
70       }
71    }
72 
73    assert(buf_offset >= 0);
74 
75    addr->buffer = buf;
76    addr->first_element = buf_offset;
77    addr->last_element = buf_offset + skip_pixels + addr->width - 1
78          + (addr->height - 1 + (addr->depth - 1) * addr->image_height) * addr->pixels_per_row;
79 
80    if (addr->last_element - addr->first_element > st->ctx->Const.MaxTextureBufferSize - 1)
81       return false;
82 
83    /* This should be ensured by Mesa before calling our callbacks */
84    assert((addr->last_element + 1) * addr->bytes_per_pixel <= buf->width0);
85 
86    addr->constants.xoffset = -addr->xoffset + skip_pixels;
87    addr->constants.yoffset = -addr->yoffset;
88    addr->constants.stride = addr->pixels_per_row;
89    addr->constants.image_size = addr->pixels_per_row * addr->image_height;
90    addr->constants.layer_offset = 0;
91 
92    return true;
93 }
94 
95 /* Validate and fill buffer addressing information based on GL pixelstore
96  * attributes.
97  *
98  * Returns false if some aspect of the addressing (e.g. alignment) prevents
99  * PBO upload/download.
100  */
101 bool
st_pbo_addresses_pixelstore(struct st_context * st,GLenum gl_target,bool skip_images,const struct gl_pixelstore_attrib * store,const void * pixels,struct st_pbo_addresses * addr)102 st_pbo_addresses_pixelstore(struct st_context *st,
103                             GLenum gl_target, bool skip_images,
104                             const struct gl_pixelstore_attrib *store,
105                             const void *pixels,
106                             struct st_pbo_addresses *addr)
107 {
108    struct pipe_resource *buf = store->BufferObj->buffer;
109    intptr_t buf_offset = (intptr_t) pixels;
110 
111    if (buf_offset % addr->bytes_per_pixel)
112       return false;
113 
114    if (store->RowLength && store->RowLength < addr->width)
115       return false;
116 
117    /* Convert to texels */
118    buf_offset = buf_offset / addr->bytes_per_pixel;
119 
120    /* Determine image height */
121    if (gl_target == GL_TEXTURE_1D_ARRAY) {
122       addr->image_height = 1;
123    } else {
124       addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height;
125    }
126 
127    /* Compute the stride, taking store->Alignment into account */
128    {
129        unsigned pixels_per_row = store->RowLength > 0 ?
130                            store->RowLength : addr->width;
131        unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel;
132        unsigned remainder = bytes_per_row % store->Alignment;
133        unsigned offset_rows;
134 
135        if (remainder > 0)
136           bytes_per_row += store->Alignment - remainder;
137 
138        if (bytes_per_row % addr->bytes_per_pixel)
139           return false;
140 
141        addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel;
142 
143        offset_rows = store->SkipRows;
144        if (skip_images)
145           offset_rows += addr->image_height * store->SkipImages;
146 
147        buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows;
148    }
149 
150    if (!st_pbo_addresses_setup(st, buf, buf_offset, addr))
151       return false;
152 
153    /* Support GL_PACK_INVERT_MESA */
154    if (store->Invert) {
155       addr->constants.xoffset += (addr->height - 1) * addr->constants.stride;
156       addr->constants.stride = -addr->constants.stride;
157    }
158 
159    return true;
160 }
161 
162 /* For download from a framebuffer, we may have to invert the Y axis. The
163  * setup is as follows:
164  * - set viewport to inverted, so that the position sysval is correct for
165  *   texel fetches
166  * - this function adjusts the fragment shader's constant buffer to compute
167  *   the correct destination addresses.
168  */
169 void
st_pbo_addresses_invert_y(struct st_pbo_addresses * addr,unsigned viewport_height)170 st_pbo_addresses_invert_y(struct st_pbo_addresses *addr,
171                           unsigned viewport_height)
172 {
173    addr->constants.xoffset +=
174       (viewport_height - 1 + 2 * addr->constants.yoffset) * addr->constants.stride;
175    addr->constants.stride = -addr->constants.stride;
176 }
177 
178 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
179  * constants, and issue the draw call for PBO upload/download.
180  *
181  * The caller is responsible for saving and restoring state, as well as for
182  * setting other fragment shader state (fragment shader, samplers), and
183  * framebuffer/viewport/DSA/blend state.
184  */
185 bool
st_pbo_draw(struct st_context * st,const struct st_pbo_addresses * addr,unsigned surface_width,unsigned surface_height)186 st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
187             unsigned surface_width, unsigned surface_height)
188 {
189    struct cso_context *cso = st->cso_context;
190    struct pipe_context *pipe = st->pipe;
191 
192    /* Setup vertex and geometry shaders */
193    if (!st->pbo.vs) {
194       st->pbo.vs = st_pbo_create_vs(st);
195       if (!st->pbo.vs)
196          return false;
197    }
198 
199    if (addr->depth != 1 && st->pbo.use_gs && !st->pbo.gs) {
200       st->pbo.gs = st_pbo_create_gs(st);
201       if (!st->pbo.gs)
202          return false;
203    }
204 
205    cso_set_vertex_shader_handle(cso, st->pbo.vs);
206 
207    cso_set_geometry_shader_handle(cso, addr->depth != 1 ? st->pbo.gs : NULL);
208 
209    cso_set_tessctrl_shader_handle(cso, NULL);
210 
211    cso_set_tesseval_shader_handle(cso, NULL);
212 
213    /* Upload vertices */
214    {
215       struct pipe_vertex_buffer vbo = {0};
216       struct cso_velems_state velem;
217 
218       float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
219       float y0 = (float) addr->yoffset / surface_height * 2.0f - 1.0f;
220       float x1 = (float) (addr->xoffset + addr->width) / surface_width * 2.0f - 1.0f;
221       float y1 = (float) (addr->yoffset + addr->height) / surface_height * 2.0f - 1.0f;
222 
223       float *verts = NULL;
224 
225       u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
226                      &vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
227       if (!verts)
228          return false;
229 
230       verts[0] = x0;
231       verts[1] = y0;
232       verts[2] = x0;
233       verts[3] = y1;
234       verts[4] = x1;
235       verts[5] = y0;
236       verts[6] = x1;
237       verts[7] = y1;
238 
239       u_upload_unmap(st->pipe->stream_uploader);
240 
241       velem.count = 1;
242       velem.velems[0].src_offset = 0;
243       velem.velems[0].src_stride = 2 * sizeof(float);
244       velem.velems[0].instance_divisor = 0;
245       velem.velems[0].vertex_buffer_index = 0;
246       velem.velems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
247       velem.velems[0].dual_slot = false;
248 
249       cso_set_vertex_elements(cso, &velem);
250       cso_set_vertex_buffers(cso, 1, true, &vbo);
251    }
252 
253    /* Upload constants */
254    {
255       struct pipe_constant_buffer cb;
256 
257       cb.buffer = NULL;
258       cb.user_buffer = &addr->constants;
259       cb.buffer_offset = 0;
260       cb.buffer_size = sizeof(addr->constants);
261 
262       pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &cb);
263 
264       pipe_resource_reference(&cb.buffer, NULL);
265    }
266 
267    /* Rasterizer state */
268    cso_set_rasterizer(cso, &st->pbo.raster);
269 
270    /* Disable stream output */
271    cso_set_stream_outputs(cso, 0, NULL, 0, 0);
272 
273    if (addr->depth == 1) {
274       cso_draw_arrays(cso, MESA_PRIM_TRIANGLE_STRIP, 0, 4);
275    } else {
276       cso_draw_arrays_instanced(cso, MESA_PRIM_TRIANGLE_STRIP,
277                                 0, 4, 0, addr->depth);
278    }
279 
280    return true;
281 }
282 
283 void *
st_pbo_create_vs(struct st_context * st)284 st_pbo_create_vs(struct st_context *st)
285 {
286    const nir_shader_compiler_options *options =
287       st_get_nir_compiler_options(st, MESA_SHADER_VERTEX);
288 
289    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_VERTEX, options,
290                                                   "st/pbo VS");
291    b.shader->info.io_lowered = true;
292 
293    if (!st->pbo.use_gs) {
294       nir_def *pos = nir_load_input(&b, 4, 32, nir_imm_int(&b, 0),
295                                     .io_semantics.location = VERT_ATTRIB_POS);
296       nir_store_output(&b, pos, nir_imm_int(&b, 0),
297                        .io_semantics.location = VARYING_SLOT_POS);
298    }
299 
300    if (st->pbo.layers) {
301       nir_variable *instance_id_var =
302          nir_create_variable_with_location(b.shader, nir_var_system_value,
303                                            SYSTEM_VALUE_INSTANCE_ID, glsl_int_type());
304       nir_def *instance_id = nir_load_var(&b, instance_id_var);
305 
306       if (st->pbo.use_gs) {
307          nir_def *pos = nir_load_input(&b, 4, 32, nir_imm_int(&b, 0),
308                                        .io_semantics.location = VERT_ATTRIB_POS);
309          nir_store_output(&b, nir_vector_insert_imm(&b, pos,
310                                                     nir_i2f32(&b, instance_id), 2),
311                           nir_imm_int(&b, 0),
312                           .io_semantics.location = VARYING_SLOT_POS);
313       } else {
314          nir_store_output(&b, instance_id, nir_imm_int(&b, 0),
315                           .src_type = nir_type_int32,
316                           .io_semantics.location = VARYING_SLOT_LAYER);
317       }
318    }
319 
320    return st_nir_finish_builtin_shader(st, b.shader);
321 }
322 
323 void *
st_pbo_create_gs(struct st_context * st)324 st_pbo_create_gs(struct st_context *st)
325 {
326    const nir_shader_compiler_options *options =
327       st_get_nir_compiler_options(st, MESA_SHADER_GEOMETRY);
328 
329    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_GEOMETRY, options,
330                                                   "st/pbo GS");
331 
332    b.shader->info.gs.input_primitive = MESA_PRIM_TRIANGLES;
333    b.shader->info.gs.output_primitive = MESA_PRIM_TRIANGLE_STRIP;
334    b.shader->info.gs.vertices_in = 3;
335    b.shader->info.gs.vertices_out = 3;
336    b.shader->info.gs.invocations = 1;
337    b.shader->info.gs.active_stream_mask = 1;
338    b.shader->info.io_lowered = true;
339 
340    for (int i = 0; i < 3; ++i) {
341       nir_def *pos =
342          nir_load_per_vertex_input(&b, 4, 32, nir_imm_int(&b, i),
343                                    nir_imm_int(&b, 0),
344                                    .io_semantics.location = VARYING_SLOT_POS);
345 
346       nir_store_output(&b, nir_vector_insert_imm(&b, pos, nir_imm_float(&b, 0.0), 2),
347                        nir_imm_int(&b, 0),
348                        .io_semantics.location = VARYING_SLOT_POS);
349 
350       /* out_layer.x = f2i(in_pos[i].z) */
351       nir_store_output(&b, nir_f2i32(&b, nir_channel(&b, pos, 2)),
352                        nir_imm_int(&b, 0),
353                        .src_type = nir_type_int32,
354                        .io_semantics.location = VARYING_SLOT_LAYER);
355       nir_emit_vertex(&b);
356    }
357 
358    return st_nir_finish_builtin_shader(st, b.shader);
359 }
360 
361 const struct glsl_type *
st_pbo_sampler_type_for_target(enum pipe_texture_target target,enum st_pbo_conversion conv)362 st_pbo_sampler_type_for_target(enum pipe_texture_target target,
363                         enum st_pbo_conversion conv)
364 {
365    bool is_array = target >= PIPE_TEXTURE_1D_ARRAY;
366    static const enum glsl_sampler_dim dim[] = {
367       [PIPE_BUFFER]             = GLSL_SAMPLER_DIM_BUF,
368       [PIPE_TEXTURE_1D]         = GLSL_SAMPLER_DIM_1D,
369       [PIPE_TEXTURE_2D]         = GLSL_SAMPLER_DIM_2D,
370       [PIPE_TEXTURE_3D]         = GLSL_SAMPLER_DIM_3D,
371       [PIPE_TEXTURE_CUBE]       = GLSL_SAMPLER_DIM_CUBE,
372       [PIPE_TEXTURE_RECT]       = GLSL_SAMPLER_DIM_RECT,
373       [PIPE_TEXTURE_1D_ARRAY]   = GLSL_SAMPLER_DIM_1D,
374       [PIPE_TEXTURE_2D_ARRAY]   = GLSL_SAMPLER_DIM_2D,
375       [PIPE_TEXTURE_CUBE_ARRAY] = GLSL_SAMPLER_DIM_CUBE,
376    };
377 
378    static const enum glsl_base_type type[] = {
379       [ST_PBO_CONVERT_FLOAT] = GLSL_TYPE_FLOAT,
380       [ST_PBO_CONVERT_UINT] = GLSL_TYPE_UINT,
381       [ST_PBO_CONVERT_UINT_TO_SINT] = GLSL_TYPE_UINT,
382       [ST_PBO_CONVERT_SINT] = GLSL_TYPE_INT,
383       [ST_PBO_CONVERT_SINT_TO_UINT] = GLSL_TYPE_INT,
384    };
385 
386    return glsl_sampler_type(dim[target], false, is_array, type[conv]);
387 }
388 
389 
390 static void *
create_fs(struct st_context * st,bool download,enum pipe_texture_target target,enum st_pbo_conversion conversion,enum pipe_format format,bool need_layer)391 create_fs(struct st_context *st, bool download,
392           enum pipe_texture_target target,
393           enum st_pbo_conversion conversion,
394           enum pipe_format format,
395           bool need_layer)
396 {
397    const nir_shader_compiler_options *options =
398       st_get_nir_compiler_options(st, MESA_SHADER_FRAGMENT);
399 
400    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, options,
401                                                   download ?
402                                                   "st/pbo download FS" :
403                                                   "st/pbo upload FS");
404    b.shader->info.io_lowered = true;
405 
406    nir_def *zero = nir_imm_int(&b, 0);
407 
408    /* param = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
409    nir_variable *param_var =
410       nir_variable_create(b.shader, nir_var_uniform, glsl_vec4_type(), "param");
411    b.shader->num_uniforms += 4;
412    nir_def *param = nir_load_var(&b, param_var);
413 
414    nir_def *coord;
415    if (st->ctx->Const.GLSLFragCoordIsSysVal) {
416       nir_variable *fragcoord =
417          nir_create_variable_with_location(b.shader, nir_var_system_value,
418                                            SYSTEM_VALUE_FRAG_COORD, glsl_vec4_type());
419       coord = nir_load_var(&b, fragcoord);
420    } else {
421       nir_def *baryc = nir_load_barycentric_pixel(&b, 32, .interp_mode = INTERP_MODE_SMOOTH);
422       coord = nir_load_interpolated_input(&b, 4, 32, baryc,
423                                           nir_imm_int(&b, 0),
424                                           .io_semantics.location = VARYING_SLOT_POS);
425    }
426 
427    /* When st->pbo.layers == false, it is guaranteed we only have a single
428     * layer. But we still need the "layer" variable to add the "array"
429     * coordinate to the texture. Hence we set layer to zero when array texture
430     * is used in case only a single layer is required.
431     */
432    nir_def *layer = NULL;
433    if (!download || target == PIPE_TEXTURE_1D_ARRAY ||
434                     target == PIPE_TEXTURE_2D_ARRAY ||
435                     target == PIPE_TEXTURE_3D ||
436                     target == PIPE_TEXTURE_CUBE ||
437                     target == PIPE_TEXTURE_CUBE_ARRAY) {
438       if (need_layer) {
439          assert(st->pbo.layers);
440          layer = nir_load_input(&b, 1, 32, nir_imm_int(&b, 0),
441                                 .dest_type = nir_type_int32,
442                                 .io_semantics.location = VARYING_SLOT_LAYER);
443       } else {
444          layer = zero;
445       }
446    }
447 
448    /* offset_pos = param.xy + f2i(coord.xy) */
449    nir_def *offset_pos =
450       nir_iadd(&b, nir_channels(&b, param, TGSI_WRITEMASK_XY),
451                nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY)));
452 
453    /* addr = offset_pos.x + offset_pos.y * stride */
454    nir_def *pbo_addr =
455       nir_iadd(&b, nir_channel(&b, offset_pos, 0),
456                nir_imul(&b, nir_channel(&b, offset_pos, 1),
457                         nir_channel(&b, param, 2)));
458    if (layer && layer != zero) {
459       /* pbo_addr += image_height * layer */
460       pbo_addr = nir_iadd(&b, pbo_addr,
461                           nir_imul(&b, layer, nir_channel(&b, param, 3)));
462    }
463 
464    nir_def *texcoord;
465    if (download) {
466       texcoord = nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY));
467 
468       if (target == PIPE_TEXTURE_1D) {
469          unsigned sw = 0;
470          texcoord = nir_swizzle(&b, texcoord, &sw, 1);
471       }
472 
473       if (layer) {
474          nir_def *src_layer = layer;
475 
476          if (target == PIPE_TEXTURE_3D) {
477             nir_variable *layer_offset_var =
478                nir_variable_create(b.shader, nir_var_uniform,
479                                    glsl_int_type(), "layer_offset");
480             b.shader->num_uniforms += 1;
481             layer_offset_var->data.driver_location = 4;
482             nir_def *layer_offset = nir_load_var(&b, layer_offset_var);
483 
484             src_layer = nir_iadd(&b, layer, layer_offset);
485          }
486 
487          if (target == PIPE_TEXTURE_1D_ARRAY) {
488             texcoord = nir_vec2(&b, nir_channel(&b, texcoord, 0),
489                                     src_layer);
490          } else {
491             texcoord = nir_vec3(&b, nir_channel(&b, texcoord, 0),
492                                     nir_channel(&b, texcoord, 1),
493                                     src_layer);
494          }
495       }
496    } else {
497       texcoord = pbo_addr;
498    }
499 
500    nir_variable *tex_var =
501       nir_variable_create(b.shader, nir_var_uniform,
502                           st_pbo_sampler_type_for_target(target, conversion),
503                           "tex");
504    tex_var->data.explicit_binding = true;
505    tex_var->data.binding = 0;
506 
507    nir_deref_instr *tex_deref = nir_build_deref_var(&b, tex_var);
508 
509    nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3);
510    tex->op = nir_texop_txf;
511    tex->sampler_dim = glsl_get_sampler_dim(tex_var->type);
512    tex->coord_components =
513       glsl_get_sampler_coordinate_components(tex_var->type);
514    tex->is_array = target >= PIPE_TEXTURE_1D_ARRAY;
515 
516    tex->dest_type = nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(tex_var->type));
517    tex->src[0].src_type = nir_tex_src_texture_deref;
518    tex->src[0].src = nir_src_for_ssa(&tex_deref->def);
519    tex->src[1].src_type = nir_tex_src_sampler_deref;
520    tex->src[1].src = nir_src_for_ssa(&tex_deref->def);
521    tex->src[2].src_type = nir_tex_src_coord;
522    tex->src[2].src = nir_src_for_ssa(texcoord);
523    nir_def_init(&tex->instr, &tex->def, 4, 32);
524    nir_builder_instr_insert(&b, &tex->instr);
525    nir_def *result = &tex->def;
526 
527    if (conversion == ST_PBO_CONVERT_SINT_TO_UINT)
528       result = nir_imax(&b, result, zero);
529    else if (conversion == ST_PBO_CONVERT_UINT_TO_SINT)
530       result = nir_umin(&b, result, nir_imm_int(&b, (1u << 31) - 1));
531 
532    if (download) {
533       static const enum glsl_base_type type[] = {
534          [ST_PBO_CONVERT_FLOAT] = GLSL_TYPE_FLOAT,
535          [ST_PBO_CONVERT_UINT] = GLSL_TYPE_UINT,
536          [ST_PBO_CONVERT_UINT_TO_SINT] = GLSL_TYPE_INT,
537          [ST_PBO_CONVERT_SINT] = GLSL_TYPE_INT,
538          [ST_PBO_CONVERT_SINT_TO_UINT] = GLSL_TYPE_UINT,
539       };
540       static const nir_alu_type nir_types[] = {
541          [ST_PBO_CONVERT_FLOAT] = nir_type_float,
542          [ST_PBO_CONVERT_UINT] = nir_type_uint,
543          [ST_PBO_CONVERT_UINT_TO_SINT] = nir_type_int,
544          [ST_PBO_CONVERT_SINT] = nir_type_int,
545          [ST_PBO_CONVERT_SINT_TO_UINT] = nir_type_uint,
546       };
547       nir_variable *img_var =
548          nir_variable_create(b.shader, nir_var_image,
549                              glsl_image_type(GLSL_SAMPLER_DIM_BUF, false,
550                                              type[conversion]), "img");
551       img_var->data.access = ACCESS_NON_READABLE;
552       img_var->data.explicit_binding = true;
553       img_var->data.binding = 0;
554       img_var->data.image.format = format;
555       nir_deref_instr *img_deref = nir_build_deref_var(&b, img_var);
556 
557       nir_image_deref_store(&b, &img_deref->def,
558                             nir_vec4(&b, pbo_addr, zero, zero, zero),
559                             zero,
560                             result,
561                             nir_imm_int(&b, 0),
562                             .src_type = nir_types[conversion],
563                             .image_dim = GLSL_SAMPLER_DIM_BUF);
564    } else {
565       nir_store_output(&b, result, nir_imm_int(&b, 0),
566                        .io_semantics.location = FRAG_RESULT_COLOR);
567    }
568 
569    return st_nir_finish_builtin_shader(st, b.shader);
570 }
571 
572 static enum st_pbo_conversion
get_pbo_conversion(enum pipe_format src_format,enum pipe_format dst_format)573 get_pbo_conversion(enum pipe_format src_format, enum pipe_format dst_format)
574 {
575    if (util_format_is_pure_uint(src_format)) {
576       if (util_format_is_pure_uint(dst_format))
577          return ST_PBO_CONVERT_UINT;
578       if (util_format_is_pure_sint(dst_format))
579          return ST_PBO_CONVERT_UINT_TO_SINT;
580    } else if (util_format_is_pure_sint(src_format)) {
581       if (util_format_is_pure_sint(dst_format))
582          return ST_PBO_CONVERT_SINT;
583       if (util_format_is_pure_uint(dst_format))
584          return ST_PBO_CONVERT_SINT_TO_UINT;
585    }
586 
587    return ST_PBO_CONVERT_FLOAT;
588 }
589 
590 void *
st_pbo_get_upload_fs(struct st_context * st,enum pipe_format src_format,enum pipe_format dst_format,bool need_layer)591 st_pbo_get_upload_fs(struct st_context *st,
592                      enum pipe_format src_format,
593                      enum pipe_format dst_format,
594                      bool need_layer)
595 {
596    STATIC_ASSERT(ARRAY_SIZE(st->pbo.upload_fs) == ST_NUM_PBO_CONVERSIONS);
597 
598    enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
599 
600    if (!st->pbo.upload_fs[conversion][need_layer])
601       st->pbo.upload_fs[conversion][need_layer] = create_fs(st, false, 0, conversion, PIPE_FORMAT_NONE, need_layer);
602 
603    return st->pbo.upload_fs[conversion][need_layer];
604 }
605 
606 void *
st_pbo_get_download_fs(struct st_context * st,enum pipe_texture_target target,enum pipe_format src_format,enum pipe_format dst_format,bool need_layer)607 st_pbo_get_download_fs(struct st_context *st, enum pipe_texture_target target,
608                        enum pipe_format src_format,
609                        enum pipe_format dst_format,
610                        bool need_layer)
611 {
612    STATIC_ASSERT(ARRAY_SIZE(st->pbo.download_fs) == ST_NUM_PBO_CONVERSIONS);
613    assert(target < PIPE_MAX_TEXTURE_TYPES);
614 
615    struct pipe_screen *screen = st->screen;
616    enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
617    bool formatless_store = screen->caps.image_store_formatted;
618 
619    /* For drivers not supporting formatless storing, download FS is stored in an
620     * indirect dynamically allocated array of storing formats.
621     */
622    if (!formatless_store && !st->pbo.download_fs[conversion][target][need_layer])
623       st->pbo.download_fs[conversion][target][need_layer] = calloc(sizeof(void *), PIPE_FORMAT_COUNT);
624 
625    if (formatless_store) {
626       if (!st->pbo.download_fs[conversion][target][need_layer])
627          st->pbo.download_fs[conversion][target][need_layer] = create_fs(st, true, target, conversion, PIPE_FORMAT_NONE, need_layer);
628       return st->pbo.download_fs[conversion][target][need_layer];
629    } else {
630       void **fs_array = (void **)st->pbo.download_fs[conversion][target][need_layer];
631       if (!fs_array[dst_format])
632          fs_array[dst_format] = create_fs(st, true, target, conversion, dst_format, need_layer);
633       return fs_array[dst_format];
634    }
635 }
636 
637 void
st_init_pbo_helpers(struct st_context * st)638 st_init_pbo_helpers(struct st_context *st)
639 {
640    struct pipe_screen *screen = st->screen;
641 
642    st->pbo.upload_enabled =
643       screen->caps.texture_buffer_objects &&
644       screen->caps.texture_buffer_offset_alignment >= 1 &&
645       screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS);
646    if (!st->pbo.upload_enabled)
647       return;
648 
649    st->pbo.download_enabled =
650       st->pbo.upload_enabled &&
651       screen->caps.sampler_view_target &&
652       screen->caps.framebuffer_no_attachment &&
653       screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
654                                        PIPE_SHADER_CAP_MAX_SHADER_IMAGES) >= 1;
655 
656    st->pbo.rgba_only =
657       screen->caps.buffer_sampler_view_rgba_only;
658 
659    if (screen->caps.vs_instanceid) {
660       if (screen->caps.vs_layer_viewport) {
661          st->pbo.layers = true;
662       } else if (screen->caps.max_geometry_output_vertices >= 3) {
663          st->pbo.layers = true;
664          st->pbo.use_gs = true;
665       }
666    }
667 
668    /* Blend state */
669    memset(&st->pbo.upload_blend, 0, sizeof(struct pipe_blend_state));
670    st->pbo.upload_blend.rt[0].colormask = PIPE_MASK_RGBA;
671 
672    /* Rasterizer state */
673    memset(&st->pbo.raster, 0, sizeof(struct pipe_rasterizer_state));
674    st->pbo.raster.half_pixel_center = 1;
675 
676    const char *pbo = debug_get_option("MESA_COMPUTE_PBO", NULL);
677    if (pbo) {
678       st->force_compute_based_texture_transfer = true;
679       st->force_specialized_compute_transfer = !strncmp(pbo, "spec", 4);
680    }
681 
682    if (st->allow_compute_based_texture_transfer || st->force_compute_based_texture_transfer)
683       st->pbo.shaders = _mesa_hash_table_create_u32_keys(NULL);
684 }
685 
686 void
st_destroy_pbo_helpers(struct st_context * st)687 st_destroy_pbo_helpers(struct st_context *st)
688 {
689    struct pipe_screen *screen = st->screen;
690    bool formatless_store = screen->caps.image_store_formatted;
691    unsigned i;
692 
693    for (i = 0; i < ARRAY_SIZE(st->pbo.upload_fs); ++i) {
694       for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.upload_fs[0]); j++) {
695          if (st->pbo.upload_fs[i][j]) {
696             st->pipe->delete_fs_state(st->pipe, st->pbo.upload_fs[i][j]);
697             st->pbo.upload_fs[i][j] = NULL;
698          }
699       }
700    }
701 
702    for (i = 0; i < ARRAY_SIZE(st->pbo.download_fs); ++i) {
703       for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.download_fs[0]); ++j) {
704          for (unsigned k = 0; k < ARRAY_SIZE(st->pbo.download_fs[0][0]); k++) {
705             if (st->pbo.download_fs[i][j][k]) {
706                if (formatless_store) {
707                   st->pipe->delete_fs_state(st->pipe, st->pbo.download_fs[i][j][k]);
708                } else {
709                   void **fs_array = (void **)st->pbo.download_fs[i][j][k];
710                   for (unsigned l = 0; l < PIPE_FORMAT_COUNT; l++)
711                      if (fs_array[l])
712                         st->pipe->delete_fs_state(st->pipe, fs_array[l]);
713                   free(st->pbo.download_fs[i][j][k]);
714                }
715                st->pbo.download_fs[i][j][k] = NULL;
716             }
717          }
718       }
719    }
720 
721    if (st->pbo.gs) {
722       st->pipe->delete_gs_state(st->pipe, st->pbo.gs);
723       st->pbo.gs = NULL;
724    }
725 
726    if (st->pbo.vs) {
727       st->pipe->delete_vs_state(st->pipe, st->pbo.vs);
728       st->pbo.vs = NULL;
729    }
730 
731    st_pbo_compute_deinit(st);
732 }
733