1 /*
2 * Copyright 2007 VMware, Inc.
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 *
28 * Common helper functions for PBO up- and downloads.
29 */
30
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_nir.h"
33 #include "state_tracker/st_pbo.h"
34
35 #include "main/context.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_screen.h"
39 #include "cso_cache/cso_context.h"
40 #include "tgsi/tgsi_ureg.h"
41 #include "util/format/u_format.h"
42 #include "util/u_inlines.h"
43 #include "util/u_upload_mgr.h"
44
45 #include "compiler/nir/nir_builder.h"
46
47 /* Final setup of buffer addressing information.
48 *
49 * buf_offset is in pixels.
50 *
51 * Returns false if something (e.g. alignment) prevents PBO upload/download.
52 */
53 bool
st_pbo_addresses_setup(struct st_context * st,struct pipe_resource * buf,intptr_t buf_offset,struct st_pbo_addresses * addr)54 st_pbo_addresses_setup(struct st_context *st,
55 struct pipe_resource *buf, intptr_t buf_offset,
56 struct st_pbo_addresses *addr)
57 {
58 unsigned skip_pixels;
59
60 /* Check alignment against texture buffer requirements. */
61 {
62 unsigned ofs = (buf_offset * addr->bytes_per_pixel) % st->ctx->Const.TextureBufferOffsetAlignment;
63 if (ofs != 0) {
64 if (ofs % addr->bytes_per_pixel != 0)
65 return false;
66
67 skip_pixels = ofs / addr->bytes_per_pixel;
68 buf_offset -= skip_pixels;
69 } else {
70 skip_pixels = 0;
71 }
72 }
73
74 assert(buf_offset >= 0);
75
76 addr->buffer = buf;
77 addr->first_element = buf_offset;
78 addr->last_element = buf_offset + skip_pixels + addr->width - 1
79 + (addr->height - 1 + (addr->depth - 1) * addr->image_height) * addr->pixels_per_row;
80
81 if (addr->last_element - addr->first_element > st->ctx->Const.MaxTextureBufferSize - 1)
82 return false;
83
84 /* This should be ensured by Mesa before calling our callbacks */
85 assert((addr->last_element + 1) * addr->bytes_per_pixel <= buf->width0);
86
87 addr->constants.xoffset = -addr->xoffset + skip_pixels;
88 addr->constants.yoffset = -addr->yoffset;
89 addr->constants.stride = addr->pixels_per_row;
90 addr->constants.image_size = addr->pixels_per_row * addr->image_height;
91 addr->constants.layer_offset = 0;
92
93 return true;
94 }
95
96 /* Validate and fill buffer addressing information based on GL pixelstore
97 * attributes.
98 *
99 * Returns false if some aspect of the addressing (e.g. alignment) prevents
100 * PBO upload/download.
101 */
102 bool
st_pbo_addresses_pixelstore(struct st_context * st,GLenum gl_target,bool skip_images,const struct gl_pixelstore_attrib * store,const void * pixels,struct st_pbo_addresses * addr)103 st_pbo_addresses_pixelstore(struct st_context *st,
104 GLenum gl_target, bool skip_images,
105 const struct gl_pixelstore_attrib *store,
106 const void *pixels,
107 struct st_pbo_addresses *addr)
108 {
109 struct pipe_resource *buf = store->BufferObj->buffer;
110 intptr_t buf_offset = (intptr_t) pixels;
111
112 if (buf_offset % addr->bytes_per_pixel)
113 return false;
114
115 /* Convert to texels */
116 buf_offset = buf_offset / addr->bytes_per_pixel;
117
118 /* Determine image height */
119 if (gl_target == GL_TEXTURE_1D_ARRAY) {
120 addr->image_height = 1;
121 } else {
122 addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height;
123 }
124
125 /* Compute the stride, taking store->Alignment into account */
126 {
127 unsigned pixels_per_row = store->RowLength > 0 ?
128 store->RowLength : addr->width;
129 unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel;
130 unsigned remainder = bytes_per_row % store->Alignment;
131 unsigned offset_rows;
132
133 if (remainder > 0)
134 bytes_per_row += store->Alignment - remainder;
135
136 if (bytes_per_row % addr->bytes_per_pixel)
137 return false;
138
139 addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel;
140
141 offset_rows = store->SkipRows;
142 if (skip_images)
143 offset_rows += addr->image_height * store->SkipImages;
144
145 buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows;
146 }
147
148 if (!st_pbo_addresses_setup(st, buf, buf_offset, addr))
149 return false;
150
151 /* Support GL_PACK_INVERT_MESA */
152 if (store->Invert) {
153 addr->constants.xoffset += (addr->height - 1) * addr->constants.stride;
154 addr->constants.stride = -addr->constants.stride;
155 }
156
157 return true;
158 }
159
160 /* For download from a framebuffer, we may have to invert the Y axis. The
161 * setup is as follows:
162 * - set viewport to inverted, so that the position sysval is correct for
163 * texel fetches
164 * - this function adjusts the fragment shader's constant buffer to compute
165 * the correct destination addresses.
166 */
167 void
st_pbo_addresses_invert_y(struct st_pbo_addresses * addr,unsigned viewport_height)168 st_pbo_addresses_invert_y(struct st_pbo_addresses *addr,
169 unsigned viewport_height)
170 {
171 addr->constants.xoffset +=
172 (viewport_height - 1 + 2 * addr->constants.yoffset) * addr->constants.stride;
173 addr->constants.stride = -addr->constants.stride;
174 }
175
176 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
177 * constants, and issue the draw call for PBO upload/download.
178 *
179 * The caller is responsible for saving and restoring state, as well as for
180 * setting other fragment shader state (fragment shader, samplers), and
181 * framebuffer/viewport/DSA/blend state.
182 */
183 bool
st_pbo_draw(struct st_context * st,const struct st_pbo_addresses * addr,unsigned surface_width,unsigned surface_height)184 st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
185 unsigned surface_width, unsigned surface_height)
186 {
187 struct cso_context *cso = st->cso_context;
188 struct pipe_context *pipe = st->pipe;
189
190 /* Setup vertex and geometry shaders */
191 if (!st->pbo.vs) {
192 st->pbo.vs = st_pbo_create_vs(st);
193 if (!st->pbo.vs)
194 return false;
195 }
196
197 if (addr->depth != 1 && st->pbo.use_gs && !st->pbo.gs) {
198 st->pbo.gs = st_pbo_create_gs(st);
199 if (!st->pbo.gs)
200 return false;
201 }
202
203 cso_set_vertex_shader_handle(cso, st->pbo.vs);
204
205 cso_set_geometry_shader_handle(cso, addr->depth != 1 ? st->pbo.gs : NULL);
206
207 cso_set_tessctrl_shader_handle(cso, NULL);
208
209 cso_set_tesseval_shader_handle(cso, NULL);
210
211 /* Upload vertices */
212 {
213 struct pipe_vertex_buffer vbo = {0};
214 struct cso_velems_state velem;
215
216 float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
217 float y0 = (float) addr->yoffset / surface_height * 2.0f - 1.0f;
218 float x1 = (float) (addr->xoffset + addr->width) / surface_width * 2.0f - 1.0f;
219 float y1 = (float) (addr->yoffset + addr->height) / surface_height * 2.0f - 1.0f;
220
221 float *verts = NULL;
222
223 vbo.stride = 2 * sizeof(float);
224
225 u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
226 &vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
227 if (!verts)
228 return false;
229
230 verts[0] = x0;
231 verts[1] = y0;
232 verts[2] = x0;
233 verts[3] = y1;
234 verts[4] = x1;
235 verts[5] = y0;
236 verts[6] = x1;
237 verts[7] = y1;
238
239 u_upload_unmap(st->pipe->stream_uploader);
240
241 velem.count = 1;
242 velem.velems[0].src_offset = 0;
243 velem.velems[0].instance_divisor = 0;
244 velem.velems[0].vertex_buffer_index = 0;
245 velem.velems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
246 velem.velems[0].dual_slot = false;
247
248 cso_set_vertex_elements(cso, &velem);
249
250 cso_set_vertex_buffers(cso, 0, 1, 0, false, &vbo);
251 st->last_num_vbuffers = MAX2(st->last_num_vbuffers, 1);
252
253 pipe_resource_reference(&vbo.buffer.resource, NULL);
254 }
255
256 /* Upload constants */
257 {
258 struct pipe_constant_buffer cb;
259
260 cb.buffer = NULL;
261 cb.user_buffer = &addr->constants;
262 cb.buffer_offset = 0;
263 cb.buffer_size = sizeof(addr->constants);
264
265 pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &cb);
266
267 pipe_resource_reference(&cb.buffer, NULL);
268 }
269
270 /* Rasterizer state */
271 cso_set_rasterizer(cso, &st->pbo.raster);
272
273 /* Disable stream output */
274 cso_set_stream_outputs(cso, 0, NULL, 0);
275
276 if (addr->depth == 1) {
277 cso_draw_arrays(cso, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
278 } else {
279 cso_draw_arrays_instanced(cso, PIPE_PRIM_TRIANGLE_STRIP,
280 0, 4, 0, addr->depth);
281 }
282
283 return true;
284 }
285
286 void *
st_pbo_create_vs(struct st_context * st)287 st_pbo_create_vs(struct st_context *st)
288 {
289 const struct glsl_type *vec4 = glsl_vec4_type();
290 const nir_shader_compiler_options *options =
291 st_get_nir_compiler_options(st, MESA_SHADER_VERTEX);
292
293 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_VERTEX, options,
294 "st/pbo VS");
295
296 nir_variable *in_pos = nir_variable_create(b.shader, nir_var_shader_in,
297 vec4, "in_pos");
298 in_pos->data.location = VERT_ATTRIB_POS;
299
300 nir_variable *out_pos = nir_variable_create(b.shader, nir_var_shader_out,
301 vec4, "out_pos");
302 out_pos->data.location = VARYING_SLOT_POS;
303 out_pos->data.interpolation = INTERP_MODE_NONE;
304
305 nir_copy_var(&b, out_pos, in_pos);
306
307 if (st->pbo.layers) {
308 nir_variable *instance_id = nir_variable_create(b.shader,
309 nir_var_system_value,
310 glsl_int_type(),
311 "instance_id");
312 instance_id->data.location = SYSTEM_VALUE_INSTANCE_ID;
313
314 if (st->pbo.use_gs) {
315 unsigned swiz_x[4] = {0, 0, 0, 0};
316 nir_store_var(&b, out_pos,
317 nir_swizzle(&b, nir_i2f32(&b, nir_load_var(&b, instance_id)), swiz_x, 4),
318 (1 << 2));
319 } else {
320 nir_variable *out_layer = nir_variable_create(b.shader,
321 nir_var_shader_out,
322 glsl_int_type(),
323 "out_layer");
324 out_layer->data.location = VARYING_SLOT_LAYER;
325 out_layer->data.interpolation = INTERP_MODE_NONE;
326 nir_copy_var(&b, out_layer, instance_id);
327 }
328 }
329
330 return st_nir_finish_builtin_shader(st, b.shader);
331 }
332
333 void *
st_pbo_create_gs(struct st_context * st)334 st_pbo_create_gs(struct st_context *st)
335 {
336 static const int zero = 0;
337 struct ureg_program *ureg;
338 struct ureg_dst out_pos;
339 struct ureg_dst out_layer;
340 struct ureg_src in_pos;
341 struct ureg_src imm;
342 unsigned i;
343
344 ureg = ureg_create(PIPE_SHADER_GEOMETRY);
345 if (!ureg)
346 return NULL;
347
348 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM, PIPE_PRIM_TRIANGLES);
349 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM, PIPE_PRIM_TRIANGLE_STRIP);
350 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES, 3);
351
352 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
353 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
354
355 in_pos = ureg_DECL_input(ureg, TGSI_SEMANTIC_POSITION, 0, 0, 1);
356
357 imm = ureg_DECL_immediate_int(ureg, &zero, 1);
358
359 for (i = 0; i < 3; ++i) {
360 struct ureg_src in_pos_vertex = ureg_src_dimension(in_pos, i);
361
362 /* out_pos = in_pos[i] */
363 ureg_MOV(ureg, out_pos, in_pos_vertex);
364
365 /* out_layer.x = f2i(in_pos[i].z) */
366 ureg_F2I(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
367 ureg_scalar(in_pos_vertex, TGSI_SWIZZLE_Z));
368
369 ureg_EMIT(ureg, ureg_scalar(imm, TGSI_SWIZZLE_X));
370 }
371
372 ureg_END(ureg);
373
374 return ureg_create_shader_and_destroy(ureg, st->pipe);
375 }
376
377 const struct glsl_type *
st_pbo_sampler_type_for_target(enum pipe_texture_target target,enum st_pbo_conversion conv)378 st_pbo_sampler_type_for_target(enum pipe_texture_target target,
379 enum st_pbo_conversion conv)
380 {
381 bool is_array = target >= PIPE_TEXTURE_1D_ARRAY;
382 static const enum glsl_sampler_dim dim[] = {
383 [PIPE_BUFFER] = GLSL_SAMPLER_DIM_BUF,
384 [PIPE_TEXTURE_1D] = GLSL_SAMPLER_DIM_1D,
385 [PIPE_TEXTURE_2D] = GLSL_SAMPLER_DIM_2D,
386 [PIPE_TEXTURE_3D] = GLSL_SAMPLER_DIM_3D,
387 [PIPE_TEXTURE_CUBE] = GLSL_SAMPLER_DIM_CUBE,
388 [PIPE_TEXTURE_RECT] = GLSL_SAMPLER_DIM_RECT,
389 [PIPE_TEXTURE_1D_ARRAY] = GLSL_SAMPLER_DIM_1D,
390 [PIPE_TEXTURE_2D_ARRAY] = GLSL_SAMPLER_DIM_2D,
391 [PIPE_TEXTURE_CUBE_ARRAY] = GLSL_SAMPLER_DIM_CUBE,
392 };
393
394 static const enum glsl_base_type type[] = {
395 [ST_PBO_CONVERT_FLOAT] = GLSL_TYPE_FLOAT,
396 [ST_PBO_CONVERT_UINT] = GLSL_TYPE_UINT,
397 [ST_PBO_CONVERT_UINT_TO_SINT] = GLSL_TYPE_UINT,
398 [ST_PBO_CONVERT_SINT] = GLSL_TYPE_INT,
399 [ST_PBO_CONVERT_SINT_TO_UINT] = GLSL_TYPE_INT,
400 };
401
402 return glsl_sampler_type(dim[target], false, is_array, type[conv]);
403 }
404
405
406 static void *
create_fs(struct st_context * st,bool download,enum pipe_texture_target target,enum st_pbo_conversion conversion,enum pipe_format format,bool need_layer)407 create_fs(struct st_context *st, bool download,
408 enum pipe_texture_target target,
409 enum st_pbo_conversion conversion,
410 enum pipe_format format,
411 bool need_layer)
412 {
413 struct pipe_screen *screen = st->screen;
414 const nir_shader_compiler_options *options =
415 st_get_nir_compiler_options(st, MESA_SHADER_FRAGMENT);
416 bool pos_is_sysval =
417 screen->get_param(screen, PIPE_CAP_FS_POSITION_IS_SYSVAL);
418
419 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, options,
420 download ?
421 "st/pbo download FS" :
422 "st/pbo upload FS");
423
424 nir_ssa_def *zero = nir_imm_int(&b, 0);
425
426 /* param = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
427 nir_variable *param_var =
428 nir_variable_create(b.shader, nir_var_uniform, glsl_vec4_type(), "param");
429 b.shader->num_uniforms += 4;
430 nir_ssa_def *param = nir_load_var(&b, param_var);
431
432 nir_variable *fragcoord =
433 nir_variable_create(b.shader, pos_is_sysval ? nir_var_system_value :
434 nir_var_shader_in, glsl_vec4_type(), "gl_FragCoord");
435 fragcoord->data.location = pos_is_sysval ? SYSTEM_VALUE_FRAG_COORD
436 : VARYING_SLOT_POS;
437 nir_ssa_def *coord = nir_load_var(&b, fragcoord);
438
439 /* When st->pbo.layers == false, it is guaranteed we only have a single
440 * layer. But we still need the "layer" variable to add the "array"
441 * coordinate to the texture. Hence we set layer to zero when array texture
442 * is used in case only a single layer is required.
443 */
444 nir_ssa_def *layer = NULL;
445 if (!download || target == PIPE_TEXTURE_1D_ARRAY ||
446 target == PIPE_TEXTURE_2D_ARRAY ||
447 target == PIPE_TEXTURE_3D ||
448 target == PIPE_TEXTURE_CUBE ||
449 target == PIPE_TEXTURE_CUBE_ARRAY) {
450 if (need_layer) {
451 assert(st->pbo.layers);
452 nir_variable *var = nir_variable_create(b.shader, nir_var_shader_in,
453 glsl_int_type(), "gl_Layer");
454 var->data.location = VARYING_SLOT_LAYER;
455 var->data.interpolation = INTERP_MODE_FLAT;
456 layer = nir_load_var(&b, var);
457 }
458 else {
459 layer = zero;
460 }
461 }
462
463 /* offset_pos = param.xy + f2i(coord.xy) */
464 nir_ssa_def *offset_pos =
465 nir_iadd(&b, nir_channels(&b, param, TGSI_WRITEMASK_XY),
466 nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY)));
467
468 /* addr = offset_pos.x + offset_pos.y * stride */
469 nir_ssa_def *pbo_addr =
470 nir_iadd(&b, nir_channel(&b, offset_pos, 0),
471 nir_imul(&b, nir_channel(&b, offset_pos, 1),
472 nir_channel(&b, param, 2)));
473 if (layer && layer != zero) {
474 /* pbo_addr += image_height * layer */
475 pbo_addr = nir_iadd(&b, pbo_addr,
476 nir_imul(&b, layer, nir_channel(&b, param, 3)));
477 }
478
479 nir_ssa_def *texcoord;
480 if (download) {
481 texcoord = nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY));
482
483 if (target == PIPE_TEXTURE_1D) {
484 unsigned sw = 0;
485 texcoord = nir_swizzle(&b, texcoord, &sw, 1);
486 }
487
488 if (layer) {
489 nir_ssa_def *src_layer = layer;
490
491 if (target == PIPE_TEXTURE_3D) {
492 nir_variable *layer_offset_var =
493 nir_variable_create(b.shader, nir_var_uniform,
494 glsl_int_type(), "layer_offset");
495 b.shader->num_uniforms += 1;
496 layer_offset_var->data.driver_location = 4;
497 nir_ssa_def *layer_offset = nir_load_var(&b, layer_offset_var);
498
499 src_layer = nir_iadd(&b, layer, layer_offset);
500 }
501
502 if (target == PIPE_TEXTURE_1D_ARRAY) {
503 texcoord = nir_vec2(&b, nir_channel(&b, texcoord, 0),
504 src_layer);
505 } else {
506 texcoord = nir_vec3(&b, nir_channel(&b, texcoord, 0),
507 nir_channel(&b, texcoord, 1),
508 src_layer);
509 }
510 }
511 } else {
512 texcoord = pbo_addr;
513 }
514
515 nir_variable *tex_var =
516 nir_variable_create(b.shader, nir_var_uniform,
517 st_pbo_sampler_type_for_target(target, conversion),
518 "tex");
519 tex_var->data.explicit_binding = true;
520 tex_var->data.binding = 0;
521
522 nir_deref_instr *tex_deref = nir_build_deref_var(&b, tex_var);
523
524 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3);
525 tex->op = nir_texop_txf;
526 tex->sampler_dim = glsl_get_sampler_dim(tex_var->type);
527 tex->coord_components =
528 glsl_get_sampler_coordinate_components(tex_var->type);
529 tex->is_array = target >= PIPE_TEXTURE_1D_ARRAY;
530
531 tex->dest_type = nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(tex_var->type));
532 tex->src[0].src_type = nir_tex_src_texture_deref;
533 tex->src[0].src = nir_src_for_ssa(&tex_deref->dest.ssa);
534 tex->src[1].src_type = nir_tex_src_sampler_deref;
535 tex->src[1].src = nir_src_for_ssa(&tex_deref->dest.ssa);
536 tex->src[2].src_type = nir_tex_src_coord;
537 tex->src[2].src = nir_src_for_ssa(texcoord);
538 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
539 nir_builder_instr_insert(&b, &tex->instr);
540 nir_ssa_def *result = &tex->dest.ssa;
541
542 if (conversion == ST_PBO_CONVERT_SINT_TO_UINT)
543 result = nir_imax(&b, result, zero);
544 else if (conversion == ST_PBO_CONVERT_UINT_TO_SINT)
545 result = nir_umin(&b, result, nir_imm_int(&b, (1u << 31) - 1));
546
547 if (download) {
548 static const enum glsl_base_type type[] = {
549 [ST_PBO_CONVERT_FLOAT] = GLSL_TYPE_FLOAT,
550 [ST_PBO_CONVERT_UINT] = GLSL_TYPE_UINT,
551 [ST_PBO_CONVERT_UINT_TO_SINT] = GLSL_TYPE_INT,
552 [ST_PBO_CONVERT_SINT] = GLSL_TYPE_INT,
553 [ST_PBO_CONVERT_SINT_TO_UINT] = GLSL_TYPE_UINT,
554 };
555 nir_variable *img_var =
556 nir_variable_create(b.shader, nir_var_image,
557 glsl_image_type(GLSL_SAMPLER_DIM_BUF, false,
558 type[conversion]), "img");
559 img_var->data.access = ACCESS_NON_READABLE;
560 img_var->data.explicit_binding = true;
561 img_var->data.binding = 0;
562 img_var->data.image.format = format;
563 nir_deref_instr *img_deref = nir_build_deref_var(&b, img_var);
564
565 nir_image_deref_store(&b, &img_deref->dest.ssa,
566 nir_vec4(&b, pbo_addr, zero, zero, zero),
567 zero,
568 result,
569 nir_imm_int(&b, 0),
570 .image_dim = GLSL_SAMPLER_DIM_BUF);
571 } else {
572 nir_variable *color =
573 nir_variable_create(b.shader, nir_var_shader_out, glsl_vec4_type(),
574 "gl_FragColor");
575 color->data.location = FRAG_RESULT_COLOR;
576
577 nir_store_var(&b, color, result, TGSI_WRITEMASK_XYZW);
578 }
579
580 return st_nir_finish_builtin_shader(st, b.shader);
581 }
582
583 static enum st_pbo_conversion
get_pbo_conversion(enum pipe_format src_format,enum pipe_format dst_format)584 get_pbo_conversion(enum pipe_format src_format, enum pipe_format dst_format)
585 {
586 if (util_format_is_pure_uint(src_format)) {
587 if (util_format_is_pure_uint(dst_format))
588 return ST_PBO_CONVERT_UINT;
589 if (util_format_is_pure_sint(dst_format))
590 return ST_PBO_CONVERT_UINT_TO_SINT;
591 } else if (util_format_is_pure_sint(src_format)) {
592 if (util_format_is_pure_sint(dst_format))
593 return ST_PBO_CONVERT_SINT;
594 if (util_format_is_pure_uint(dst_format))
595 return ST_PBO_CONVERT_SINT_TO_UINT;
596 }
597
598 return ST_PBO_CONVERT_FLOAT;
599 }
600
601 void *
st_pbo_get_upload_fs(struct st_context * st,enum pipe_format src_format,enum pipe_format dst_format,bool need_layer)602 st_pbo_get_upload_fs(struct st_context *st,
603 enum pipe_format src_format,
604 enum pipe_format dst_format,
605 bool need_layer)
606 {
607 STATIC_ASSERT(ARRAY_SIZE(st->pbo.upload_fs) == ST_NUM_PBO_CONVERSIONS);
608
609 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
610
611 if (!st->pbo.upload_fs[conversion][need_layer])
612 st->pbo.upload_fs[conversion][need_layer] = create_fs(st, false, 0, conversion, PIPE_FORMAT_NONE, need_layer);
613
614 return st->pbo.upload_fs[conversion][need_layer];
615 }
616
617 void *
st_pbo_get_download_fs(struct st_context * st,enum pipe_texture_target target,enum pipe_format src_format,enum pipe_format dst_format,bool need_layer)618 st_pbo_get_download_fs(struct st_context *st, enum pipe_texture_target target,
619 enum pipe_format src_format,
620 enum pipe_format dst_format,
621 bool need_layer)
622 {
623 STATIC_ASSERT(ARRAY_SIZE(st->pbo.download_fs) == ST_NUM_PBO_CONVERSIONS);
624 assert(target < PIPE_MAX_TEXTURE_TYPES);
625
626 struct pipe_screen *screen = st->screen;
627 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
628 bool formatless_store = screen->get_param(screen, PIPE_CAP_IMAGE_STORE_FORMATTED);
629
630 /* For drivers not supporting formatless storing, download FS is stored in an
631 * indirect dynamically allocated array of storing formats.
632 */
633 if (!formatless_store && !st->pbo.download_fs[conversion][target][need_layer])
634 st->pbo.download_fs[conversion][target][need_layer] = calloc(sizeof(void *), PIPE_FORMAT_COUNT);
635
636 if (formatless_store) {
637 if (!st->pbo.download_fs[conversion][target][need_layer])
638 st->pbo.download_fs[conversion][target][need_layer] = create_fs(st, true, target, conversion, PIPE_FORMAT_NONE, need_layer);
639 return st->pbo.download_fs[conversion][target][need_layer];
640 } else {
641 void **fs_array = (void **)st->pbo.download_fs[conversion][target][need_layer];
642 if (!fs_array[dst_format])
643 fs_array[dst_format] = create_fs(st, true, target, conversion, dst_format, need_layer);
644 return fs_array[dst_format];
645 }
646 }
647
648 void
st_init_pbo_helpers(struct st_context * st)649 st_init_pbo_helpers(struct st_context *st)
650 {
651 struct pipe_screen *screen = st->screen;
652
653 st->pbo.upload_enabled =
654 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OBJECTS) &&
655 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT) >= 1 &&
656 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS);
657 if (!st->pbo.upload_enabled)
658 return;
659
660 st->pbo.download_enabled =
661 st->pbo.upload_enabled &&
662 screen->get_param(screen, PIPE_CAP_SAMPLER_VIEW_TARGET) &&
663 screen->get_param(screen, PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT) &&
664 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
665 PIPE_SHADER_CAP_MAX_SHADER_IMAGES) >= 1;
666
667 st->pbo.rgba_only =
668 screen->get_param(screen, PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY);
669
670 if (screen->get_param(screen, PIPE_CAP_VS_INSTANCEID)) {
671 if (screen->get_param(screen, PIPE_CAP_VS_LAYER_VIEWPORT)) {
672 st->pbo.layers = true;
673 } else if (screen->get_param(screen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES) >= 3 &&
674 screen->get_shader_param(screen, PIPE_SHADER_GEOMETRY,
675 PIPE_SHADER_CAP_PREFERRED_IR) != PIPE_SHADER_IR_NIR) {
676 /* As the download GS is created in TGSI, and TGSI to NIR translation
677 * is not implemented for GS, avoid using GS for drivers preferring
678 * NIR shaders.
679 */
680 st->pbo.layers = true;
681 st->pbo.use_gs = true;
682 }
683 }
684
685 /* Blend state */
686 memset(&st->pbo.upload_blend, 0, sizeof(struct pipe_blend_state));
687 st->pbo.upload_blend.rt[0].colormask = PIPE_MASK_RGBA;
688
689 /* Rasterizer state */
690 memset(&st->pbo.raster, 0, sizeof(struct pipe_rasterizer_state));
691 st->pbo.raster.half_pixel_center = 1;
692
693 if (st->allow_compute_based_texture_transfer)
694 st->pbo.shaders = _mesa_hash_table_create_u32_keys(NULL);
695 }
696
697 void
st_destroy_pbo_helpers(struct st_context * st)698 st_destroy_pbo_helpers(struct st_context *st)
699 {
700 struct pipe_screen *screen = st->screen;
701 bool formatless_store = screen->get_param(screen, PIPE_CAP_IMAGE_STORE_FORMATTED);
702 unsigned i;
703
704 for (i = 0; i < ARRAY_SIZE(st->pbo.upload_fs); ++i) {
705 for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.upload_fs[0]); j++) {
706 if (st->pbo.upload_fs[i][j]) {
707 st->pipe->delete_fs_state(st->pipe, st->pbo.upload_fs[i][j]);
708 st->pbo.upload_fs[i][j] = NULL;
709 }
710 }
711 }
712
713 for (i = 0; i < ARRAY_SIZE(st->pbo.download_fs); ++i) {
714 for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.download_fs[0]); ++j) {
715 for (unsigned k = 0; k < ARRAY_SIZE(st->pbo.download_fs[0][0]); k++) {
716 if (st->pbo.download_fs[i][j][k]) {
717 if (formatless_store) {
718 st->pipe->delete_fs_state(st->pipe, st->pbo.download_fs[i][j][k]);
719 } else {
720 void **fs_array = (void **)st->pbo.download_fs[i][j][k];
721 for (unsigned l = 0; l < PIPE_FORMAT_COUNT; l++)
722 if (fs_array[l])
723 st->pipe->delete_fs_state(st->pipe, fs_array[l]);
724 free(st->pbo.download_fs[i][j][k]);
725 }
726 st->pbo.download_fs[i][j][k] = NULL;
727 }
728 }
729 }
730 }
731
732 if (st->pbo.gs) {
733 st->pipe->delete_gs_state(st->pipe, st->pbo.gs);
734 st->pbo.gs = NULL;
735 }
736
737 if (st->pbo.vs) {
738 st->pipe->delete_vs_state(st->pipe, st->pbo.vs);
739 st->pbo.vs = NULL;
740 }
741
742 if (st->pbo.shaders) {
743 hash_table_foreach(st->pbo.shaders, entry)
744 st->pipe->delete_compute_state(st->pipe, entry->data);
745 _mesa_hash_table_destroy(st->pbo.shaders, NULL);
746 }
747 }
748