1 /*
2 * Copyright 2007 VMware, Inc.
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 *
28 * Common helper functions for PBO up- and downloads.
29 */
30
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_pbo.h"
33 #include "state_tracker/st_cb_bufferobjects.h"
34
35 #include "pipe/p_context.h"
36 #include "pipe/p_defines.h"
37 #include "pipe/p_screen.h"
38 #include "cso_cache/cso_context.h"
39 #include "tgsi/tgsi_ureg.h"
40 #include "util/u_format.h"
41 #include "util/u_inlines.h"
42 #include "util/u_upload_mgr.h"
43
44 /* Conversion to apply in the fragment shader. */
45 enum st_pbo_conversion {
46 ST_PBO_CONVERT_NONE = 0,
47 ST_PBO_CONVERT_UINT_TO_SINT,
48 ST_PBO_CONVERT_SINT_TO_UINT,
49
50 ST_NUM_PBO_CONVERSIONS
51 };
52
53 /* Final setup of buffer addressing information.
54 *
55 * buf_offset is in pixels.
56 *
57 * Returns false if something (e.g. alignment) prevents PBO upload/download.
58 */
59 bool
st_pbo_addresses_setup(struct st_context * st,struct pipe_resource * buf,intptr_t buf_offset,struct st_pbo_addresses * addr)60 st_pbo_addresses_setup(struct st_context *st,
61 struct pipe_resource *buf, intptr_t buf_offset,
62 struct st_pbo_addresses *addr)
63 {
64 unsigned skip_pixels;
65
66 /* Check alignment against texture buffer requirements. */
67 {
68 unsigned ofs = (buf_offset * addr->bytes_per_pixel) % st->ctx->Const.TextureBufferOffsetAlignment;
69 if (ofs != 0) {
70 if (ofs % addr->bytes_per_pixel != 0)
71 return false;
72
73 skip_pixels = ofs / addr->bytes_per_pixel;
74 buf_offset -= skip_pixels;
75 } else {
76 skip_pixels = 0;
77 }
78 }
79
80 assert(buf_offset >= 0);
81
82 addr->buffer = buf;
83 addr->first_element = buf_offset;
84 addr->last_element = buf_offset + skip_pixels + addr->width - 1
85 + (addr->height - 1 + (addr->depth - 1) * addr->image_height) * addr->pixels_per_row;
86
87 if (addr->last_element - addr->first_element > st->ctx->Const.MaxTextureBufferSize - 1)
88 return false;
89
90 /* This should be ensured by Mesa before calling our callbacks */
91 assert((addr->last_element + 1) * addr->bytes_per_pixel <= buf->width0);
92
93 addr->constants.xoffset = -addr->xoffset + skip_pixels;
94 addr->constants.yoffset = -addr->yoffset;
95 addr->constants.stride = addr->pixels_per_row;
96 addr->constants.image_size = addr->pixels_per_row * addr->image_height;
97 addr->constants.layer_offset = 0;
98
99 return true;
100 }
101
102 /* Validate and fill buffer addressing information based on GL pixelstore
103 * attributes.
104 *
105 * Returns false if some aspect of the addressing (e.g. alignment) prevents
106 * PBO upload/download.
107 */
108 bool
st_pbo_addresses_pixelstore(struct st_context * st,GLenum gl_target,bool skip_images,const struct gl_pixelstore_attrib * store,const void * pixels,struct st_pbo_addresses * addr)109 st_pbo_addresses_pixelstore(struct st_context *st,
110 GLenum gl_target, bool skip_images,
111 const struct gl_pixelstore_attrib *store,
112 const void *pixels,
113 struct st_pbo_addresses *addr)
114 {
115 struct pipe_resource *buf = st_buffer_object(store->BufferObj)->buffer;
116 intptr_t buf_offset = (intptr_t) pixels;
117
118 if (buf_offset % addr->bytes_per_pixel)
119 return false;
120
121 /* Convert to texels */
122 buf_offset = buf_offset / addr->bytes_per_pixel;
123
124 /* Determine image height */
125 if (gl_target == GL_TEXTURE_1D_ARRAY) {
126 addr->image_height = 1;
127 } else {
128 addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height;
129 }
130
131 /* Compute the stride, taking store->Alignment into account */
132 {
133 unsigned pixels_per_row = store->RowLength > 0 ?
134 store->RowLength : addr->width;
135 unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel;
136 unsigned remainder = bytes_per_row % store->Alignment;
137 unsigned offset_rows;
138
139 if (remainder > 0)
140 bytes_per_row += store->Alignment - remainder;
141
142 if (bytes_per_row % addr->bytes_per_pixel)
143 return false;
144
145 addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel;
146
147 offset_rows = store->SkipRows;
148 if (skip_images)
149 offset_rows += addr->image_height * store->SkipImages;
150
151 buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows;
152 }
153
154 if (!st_pbo_addresses_setup(st, buf, buf_offset, addr))
155 return false;
156
157 /* Support GL_PACK_INVERT_MESA */
158 if (store->Invert) {
159 addr->constants.xoffset += (addr->height - 1) * addr->constants.stride;
160 addr->constants.stride = -addr->constants.stride;
161 }
162
163 return true;
164 }
165
166 /* For download from a framebuffer, we may have to invert the Y axis. The
167 * setup is as follows:
168 * - set viewport to inverted, so that the position sysval is correct for
169 * texel fetches
170 * - this function adjusts the fragment shader's constant buffer to compute
171 * the correct destination addresses.
172 */
173 void
st_pbo_addresses_invert_y(struct st_pbo_addresses * addr,unsigned viewport_height)174 st_pbo_addresses_invert_y(struct st_pbo_addresses *addr,
175 unsigned viewport_height)
176 {
177 addr->constants.xoffset +=
178 (viewport_height - 1 + 2 * addr->constants.yoffset) * addr->constants.stride;
179 addr->constants.stride = -addr->constants.stride;
180 }
181
182 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
183 * constants, and issue the draw call for PBO upload/download.
184 *
185 * The caller is responsible for saving and restoring state, as well as for
186 * setting other fragment shader state (fragment shader, samplers), and
187 * framebuffer/viewport/DSA/blend state.
188 */
189 bool
st_pbo_draw(struct st_context * st,const struct st_pbo_addresses * addr,unsigned surface_width,unsigned surface_height)190 st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
191 unsigned surface_width, unsigned surface_height)
192 {
193 struct cso_context *cso = st->cso_context;
194
195 /* Setup vertex and geometry shaders */
196 if (!st->pbo.vs) {
197 st->pbo.vs = st_pbo_create_vs(st);
198 if (!st->pbo.vs)
199 return false;
200 }
201
202 if (addr->depth != 1 && st->pbo.use_gs && !st->pbo.gs) {
203 st->pbo.gs = st_pbo_create_gs(st);
204 if (!st->pbo.gs)
205 return false;
206 }
207
208 cso_set_vertex_shader_handle(cso, st->pbo.vs);
209
210 cso_set_geometry_shader_handle(cso, addr->depth != 1 ? st->pbo.gs : NULL);
211
212 cso_set_tessctrl_shader_handle(cso, NULL);
213
214 cso_set_tesseval_shader_handle(cso, NULL);
215
216 /* Upload vertices */
217 {
218 struct pipe_vertex_buffer vbo = {0};
219 struct pipe_vertex_element velem;
220
221 float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
222 float y0 = (float) addr->yoffset / surface_height * 2.0f - 1.0f;
223 float x1 = (float) (addr->xoffset + addr->width) / surface_width * 2.0f - 1.0f;
224 float y1 = (float) (addr->yoffset + addr->height) / surface_height * 2.0f - 1.0f;
225
226 float *verts = NULL;
227
228 vbo.stride = 2 * sizeof(float);
229
230 u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
231 &vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
232 if (!verts)
233 return false;
234
235 verts[0] = x0;
236 verts[1] = y0;
237 verts[2] = x0;
238 verts[3] = y1;
239 verts[4] = x1;
240 verts[5] = y0;
241 verts[6] = x1;
242 verts[7] = y1;
243
244 u_upload_unmap(st->pipe->stream_uploader);
245
246 velem.src_offset = 0;
247 velem.instance_divisor = 0;
248 velem.vertex_buffer_index = cso_get_aux_vertex_buffer_slot(cso);
249 velem.src_format = PIPE_FORMAT_R32G32_FLOAT;
250
251 cso_set_vertex_elements(cso, 1, &velem);
252
253 cso_set_vertex_buffers(cso, velem.vertex_buffer_index, 1, &vbo);
254
255 pipe_resource_reference(&vbo.buffer.resource, NULL);
256 }
257
258 /* Upload constants */
259 {
260 struct pipe_constant_buffer cb;
261
262 cb.buffer = NULL;
263 cb.user_buffer = &addr->constants;
264 cb.buffer_offset = 0;
265 cb.buffer_size = sizeof(addr->constants);
266
267 cso_set_constant_buffer(cso, PIPE_SHADER_FRAGMENT, 0, &cb);
268
269 pipe_resource_reference(&cb.buffer, NULL);
270 }
271
272 /* Rasterizer state */
273 cso_set_rasterizer(cso, &st->pbo.raster);
274
275 /* Disable stream output */
276 cso_set_stream_outputs(cso, 0, NULL, 0);
277
278 if (addr->depth == 1) {
279 cso_draw_arrays(cso, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
280 } else {
281 cso_draw_arrays_instanced(cso, PIPE_PRIM_TRIANGLE_STRIP,
282 0, 4, 0, addr->depth);
283 }
284
285 return true;
286 }
287
288 void *
st_pbo_create_vs(struct st_context * st)289 st_pbo_create_vs(struct st_context *st)
290 {
291 struct ureg_program *ureg;
292 struct ureg_src in_pos;
293 struct ureg_src in_instanceid;
294 struct ureg_dst out_pos;
295 struct ureg_dst out_layer;
296
297 ureg = ureg_create(PIPE_SHADER_VERTEX);
298 if (!ureg)
299 return NULL;
300
301 in_pos = ureg_DECL_vs_input(ureg, TGSI_SEMANTIC_POSITION);
302
303 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
304
305 if (st->pbo.layers) {
306 in_instanceid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
307
308 if (!st->pbo.use_gs)
309 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
310 }
311
312 /* out_pos = in_pos */
313 ureg_MOV(ureg, out_pos, in_pos);
314
315 if (st->pbo.layers) {
316 if (st->pbo.use_gs) {
317 /* out_pos.z = i2f(gl_InstanceID) */
318 ureg_I2F(ureg, ureg_writemask(out_pos, TGSI_WRITEMASK_Z),
319 ureg_scalar(in_instanceid, TGSI_SWIZZLE_X));
320 } else {
321 /* out_layer = gl_InstanceID */
322 ureg_MOV(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
323 ureg_scalar(in_instanceid, TGSI_SWIZZLE_X));
324 }
325 }
326
327 ureg_END(ureg);
328
329 return ureg_create_shader_and_destroy(ureg, st->pipe);
330 }
331
332 void *
st_pbo_create_gs(struct st_context * st)333 st_pbo_create_gs(struct st_context *st)
334 {
335 static const int zero = 0;
336 struct ureg_program *ureg;
337 struct ureg_dst out_pos;
338 struct ureg_dst out_layer;
339 struct ureg_src in_pos;
340 struct ureg_src imm;
341 unsigned i;
342
343 ureg = ureg_create(PIPE_SHADER_GEOMETRY);
344 if (!ureg)
345 return NULL;
346
347 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM, PIPE_PRIM_TRIANGLES);
348 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM, PIPE_PRIM_TRIANGLE_STRIP);
349 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES, 3);
350
351 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
352 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
353
354 in_pos = ureg_DECL_input(ureg, TGSI_SEMANTIC_POSITION, 0, 0, 1);
355
356 imm = ureg_DECL_immediate_int(ureg, &zero, 1);
357
358 for (i = 0; i < 3; ++i) {
359 struct ureg_src in_pos_vertex = ureg_src_dimension(in_pos, i);
360
361 /* out_pos = in_pos[i] */
362 ureg_MOV(ureg, out_pos, in_pos_vertex);
363
364 /* out_layer.x = f2i(in_pos[i].z) */
365 ureg_F2I(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
366 ureg_scalar(in_pos_vertex, TGSI_SWIZZLE_Z));
367
368 ureg_EMIT(ureg, ureg_scalar(imm, TGSI_SWIZZLE_X));
369 }
370
371 ureg_END(ureg);
372
373 return ureg_create_shader_and_destroy(ureg, st->pipe);
374 }
375
376 static void
build_conversion(struct ureg_program * ureg,const struct ureg_dst * temp,enum st_pbo_conversion conversion)377 build_conversion(struct ureg_program *ureg, const struct ureg_dst *temp,
378 enum st_pbo_conversion conversion)
379 {
380 switch (conversion) {
381 case ST_PBO_CONVERT_SINT_TO_UINT:
382 ureg_IMAX(ureg, *temp, ureg_src(*temp), ureg_imm1i(ureg, 0));
383 break;
384 case ST_PBO_CONVERT_UINT_TO_SINT:
385 ureg_UMIN(ureg, *temp, ureg_src(*temp), ureg_imm1u(ureg, (1u << 31) - 1));
386 break;
387 default:
388 /* no-op */
389 break;
390 }
391 }
392
393 static void *
create_fs(struct st_context * st,bool download,enum pipe_texture_target target,enum st_pbo_conversion conversion)394 create_fs(struct st_context *st, bool download, enum pipe_texture_target target,
395 enum st_pbo_conversion conversion)
396 {
397 struct pipe_context *pipe = st->pipe;
398 struct pipe_screen *screen = pipe->screen;
399 struct ureg_program *ureg;
400 bool have_layer;
401 struct ureg_dst out;
402 struct ureg_src sampler;
403 struct ureg_src pos;
404 struct ureg_src layer;
405 struct ureg_src const0;
406 struct ureg_src const1;
407 struct ureg_dst temp0;
408
409 have_layer =
410 st->pbo.layers &&
411 (!download || target == PIPE_TEXTURE_1D_ARRAY
412 || target == PIPE_TEXTURE_2D_ARRAY
413 || target == PIPE_TEXTURE_3D
414 || target == PIPE_TEXTURE_CUBE
415 || target == PIPE_TEXTURE_CUBE_ARRAY);
416
417 ureg = ureg_create(PIPE_SHADER_FRAGMENT);
418 if (!ureg)
419 return NULL;
420
421 if (!download) {
422 out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
423 } else {
424 struct ureg_src image;
425
426 /* writeonly images do not require an explicitly given format. */
427 image = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE,
428 true, false);
429 out = ureg_dst(image);
430 }
431
432 sampler = ureg_DECL_sampler(ureg, 0);
433 if (screen->get_param(screen, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL)) {
434 pos = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_POSITION, 0);
435 } else {
436 pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0,
437 TGSI_INTERPOLATE_LINEAR);
438 }
439 if (have_layer) {
440 layer = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_LAYER, 0,
441 TGSI_INTERPOLATE_CONSTANT);
442 }
443 const0 = ureg_DECL_constant(ureg, 0);
444 const1 = ureg_DECL_constant(ureg, 1);
445 temp0 = ureg_DECL_temporary(ureg);
446
447 /* Note: const0 = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
448
449 /* temp0.xy = f2i(temp0.xy) */
450 ureg_F2I(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
451 ureg_swizzle(pos,
452 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
453 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
454
455 /* temp0.xy = temp0.xy + const0.xy */
456 ureg_UADD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
457 ureg_swizzle(ureg_src(temp0),
458 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
459 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y),
460 ureg_swizzle(const0,
461 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
462 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
463
464 /* temp0.x = const0.z * temp0.y + temp0.x */
465 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
466 ureg_scalar(const0, TGSI_SWIZZLE_Z),
467 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_Y),
468 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
469
470 if (have_layer) {
471 /* temp0.x = const0.w * layer + temp0.x */
472 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
473 ureg_scalar(const0, TGSI_SWIZZLE_W),
474 ureg_scalar(layer, TGSI_SWIZZLE_X),
475 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
476 }
477
478 /* temp0.w = 0 */
479 ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_W), ureg_imm1u(ureg, 0));
480
481 if (download) {
482 struct ureg_dst temp1;
483 struct ureg_src op[2];
484
485 temp1 = ureg_DECL_temporary(ureg);
486
487 /* temp1.xy = pos.xy */
488 ureg_F2I(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_XY), pos);
489
490 /* temp1.zw = 0 */
491 ureg_MOV(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_ZW), ureg_imm1u(ureg, 0));
492
493 if (have_layer) {
494 struct ureg_dst temp1_layer =
495 ureg_writemask(temp1, target == PIPE_TEXTURE_1D_ARRAY ? TGSI_WRITEMASK_Y
496 : TGSI_WRITEMASK_Z);
497
498 /* temp1.y/z = layer */
499 ureg_MOV(ureg, temp1_layer, ureg_scalar(layer, TGSI_SWIZZLE_X));
500
501 if (target == PIPE_TEXTURE_3D) {
502 /* temp1.z += layer_offset */
503 ureg_UADD(ureg, temp1_layer,
504 ureg_scalar(ureg_src(temp1), TGSI_SWIZZLE_Z),
505 ureg_scalar(const1, TGSI_SWIZZLE_X));
506 }
507 }
508
509 /* temp1 = txf(sampler, temp1) */
510 ureg_TXF(ureg, temp1, util_pipe_tex_to_tgsi_tex(target, 1),
511 ureg_src(temp1), sampler);
512
513 build_conversion(ureg, &temp1, conversion);
514
515 /* store(out, temp0, temp1) */
516 op[0] = ureg_src(temp0);
517 op[1] = ureg_src(temp1);
518 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &out, 1, op, 2, 0,
519 TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE);
520
521 ureg_release_temporary(ureg, temp1);
522 } else {
523 /* out = txf(sampler, temp0.x) */
524 ureg_TXF(ureg, temp0, TGSI_TEXTURE_BUFFER, ureg_src(temp0), sampler);
525
526 build_conversion(ureg, &temp0, conversion);
527
528 ureg_MOV(ureg, out, ureg_src(temp0));
529 }
530
531 ureg_release_temporary(ureg, temp0);
532
533 ureg_END(ureg);
534
535 return ureg_create_shader_and_destroy(ureg, pipe);
536 }
537
538 static enum st_pbo_conversion
get_pbo_conversion(enum pipe_format src_format,enum pipe_format dst_format)539 get_pbo_conversion(enum pipe_format src_format, enum pipe_format dst_format)
540 {
541 if (util_format_is_pure_uint(src_format)) {
542 if (util_format_is_pure_sint(dst_format))
543 return ST_PBO_CONVERT_UINT_TO_SINT;
544 } else if (util_format_is_pure_sint(src_format)) {
545 if (util_format_is_pure_uint(dst_format))
546 return ST_PBO_CONVERT_SINT_TO_UINT;
547 }
548
549 return ST_PBO_CONVERT_NONE;
550 }
551
552 void *
st_pbo_get_upload_fs(struct st_context * st,enum pipe_format src_format,enum pipe_format dst_format)553 st_pbo_get_upload_fs(struct st_context *st,
554 enum pipe_format src_format,
555 enum pipe_format dst_format)
556 {
557 STATIC_ASSERT(ARRAY_SIZE(st->pbo.upload_fs) == ST_NUM_PBO_CONVERSIONS);
558
559 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
560
561 if (!st->pbo.upload_fs[conversion])
562 st->pbo.upload_fs[conversion] = create_fs(st, false, 0, conversion);
563
564 return st->pbo.upload_fs[conversion];
565 }
566
567 void *
st_pbo_get_download_fs(struct st_context * st,enum pipe_texture_target target,enum pipe_format src_format,enum pipe_format dst_format)568 st_pbo_get_download_fs(struct st_context *st, enum pipe_texture_target target,
569 enum pipe_format src_format,
570 enum pipe_format dst_format)
571 {
572 STATIC_ASSERT(ARRAY_SIZE(st->pbo.download_fs) == ST_NUM_PBO_CONVERSIONS);
573 assert(target < PIPE_MAX_TEXTURE_TYPES);
574
575 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
576
577 if (!st->pbo.download_fs[conversion][target])
578 st->pbo.download_fs[conversion][target] = create_fs(st, true, target, conversion);
579
580 return st->pbo.download_fs[conversion][target];
581 }
582
583 void
st_init_pbo_helpers(struct st_context * st)584 st_init_pbo_helpers(struct st_context *st)
585 {
586 struct pipe_context *pipe = st->pipe;
587 struct pipe_screen *screen = pipe->screen;
588
589 st->pbo.upload_enabled =
590 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OBJECTS) &&
591 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT) >= 1 &&
592 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS);
593 if (!st->pbo.upload_enabled)
594 return;
595
596 st->pbo.download_enabled =
597 st->pbo.upload_enabled &&
598 screen->get_param(screen, PIPE_CAP_SAMPLER_VIEW_TARGET) &&
599 screen->get_param(screen, PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT) &&
600 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
601 PIPE_SHADER_CAP_MAX_SHADER_IMAGES) >= 1;
602
603 st->pbo.rgba_only =
604 screen->get_param(screen, PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY);
605
606 if (screen->get_param(screen, PIPE_CAP_TGSI_INSTANCEID)) {
607 if (screen->get_param(screen, PIPE_CAP_TGSI_VS_LAYER_VIEWPORT)) {
608 st->pbo.layers = true;
609 } else if (screen->get_param(screen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES) >= 3) {
610 st->pbo.layers = true;
611 st->pbo.use_gs = true;
612 }
613 }
614
615 /* Blend state */
616 memset(&st->pbo.upload_blend, 0, sizeof(struct pipe_blend_state));
617 st->pbo.upload_blend.rt[0].colormask = PIPE_MASK_RGBA;
618
619 /* Rasterizer state */
620 memset(&st->pbo.raster, 0, sizeof(struct pipe_rasterizer_state));
621 st->pbo.raster.half_pixel_center = 1;
622 }
623
624 void
st_destroy_pbo_helpers(struct st_context * st)625 st_destroy_pbo_helpers(struct st_context *st)
626 {
627 unsigned i;
628
629 for (i = 0; i < ARRAY_SIZE(st->pbo.upload_fs); ++i) {
630 if (st->pbo.upload_fs[i]) {
631 cso_delete_fragment_shader(st->cso_context, st->pbo.upload_fs[i]);
632 st->pbo.upload_fs[i] = NULL;
633 }
634 }
635
636 for (i = 0; i < ARRAY_SIZE(st->pbo.download_fs); ++i) {
637 for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.download_fs[0]); ++j) {
638 if (st->pbo.download_fs[i][j]) {
639 cso_delete_fragment_shader(st->cso_context, st->pbo.download_fs[i][j]);
640 st->pbo.download_fs[i][j] = NULL;
641 }
642 }
643 }
644
645 if (st->pbo.gs) {
646 cso_delete_geometry_shader(st->cso_context, st->pbo.gs);
647 st->pbo.gs = NULL;
648 }
649
650 if (st->pbo.vs) {
651 cso_delete_vertex_shader(st->cso_context, st->pbo.vs);
652 st->pbo.vs = NULL;
653 }
654 }
655