1 /*
2 * Copyright 2007 VMware, Inc.
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 *
28 * Common helper functions for PBO up- and downloads.
29 */
30
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_pbo.h"
33 #include "state_tracker/st_cb_bufferobjects.h"
34
35 #include "pipe/p_context.h"
36 #include "pipe/p_defines.h"
37 #include "pipe/p_screen.h"
38 #include "cso_cache/cso_context.h"
39 #include "tgsi/tgsi_ureg.h"
40 #include "util/u_format.h"
41 #include "util/u_inlines.h"
42 #include "util/u_upload_mgr.h"
43
44 /* Conversion to apply in the fragment shader. */
45 enum st_pbo_conversion {
46 ST_PBO_CONVERT_NONE = 0,
47 ST_PBO_CONVERT_UINT_TO_SINT,
48 ST_PBO_CONVERT_SINT_TO_UINT,
49
50 ST_NUM_PBO_CONVERSIONS
51 };
52
53 /* Final setup of buffer addressing information.
54 *
55 * buf_offset is in pixels.
56 *
57 * Returns false if something (e.g. alignment) prevents PBO upload/download.
58 */
59 bool
st_pbo_addresses_setup(struct st_context * st,struct pipe_resource * buf,intptr_t buf_offset,struct st_pbo_addresses * addr)60 st_pbo_addresses_setup(struct st_context *st,
61 struct pipe_resource *buf, intptr_t buf_offset,
62 struct st_pbo_addresses *addr)
63 {
64 unsigned skip_pixels;
65
66 /* Check alignment against texture buffer requirements. */
67 {
68 unsigned ofs = (buf_offset * addr->bytes_per_pixel) % st->ctx->Const.TextureBufferOffsetAlignment;
69 if (ofs != 0) {
70 if (ofs % addr->bytes_per_pixel != 0)
71 return false;
72
73 skip_pixels = ofs / addr->bytes_per_pixel;
74 buf_offset -= skip_pixels;
75 } else {
76 skip_pixels = 0;
77 }
78 }
79
80 assert(buf_offset >= 0);
81
82 addr->buffer = buf;
83 addr->first_element = buf_offset;
84 addr->last_element = buf_offset + skip_pixels + addr->width - 1
85 + (addr->height - 1 + (addr->depth - 1) * addr->image_height) * addr->pixels_per_row;
86
87 if (addr->last_element - addr->first_element > st->ctx->Const.MaxTextureBufferSize - 1)
88 return false;
89
90 /* This should be ensured by Mesa before calling our callbacks */
91 assert((addr->last_element + 1) * addr->bytes_per_pixel <= buf->width0);
92
93 addr->constants.xoffset = -addr->xoffset + skip_pixels;
94 addr->constants.yoffset = -addr->yoffset;
95 addr->constants.stride = addr->pixels_per_row;
96 addr->constants.image_size = addr->pixels_per_row * addr->image_height;
97 addr->constants.layer_offset = 0;
98
99 return true;
100 }
101
102 /* Validate and fill buffer addressing information based on GL pixelstore
103 * attributes.
104 *
105 * Returns false if some aspect of the addressing (e.g. alignment) prevents
106 * PBO upload/download.
107 */
108 bool
st_pbo_addresses_pixelstore(struct st_context * st,GLenum gl_target,bool skip_images,const struct gl_pixelstore_attrib * store,const void * pixels,struct st_pbo_addresses * addr)109 st_pbo_addresses_pixelstore(struct st_context *st,
110 GLenum gl_target, bool skip_images,
111 const struct gl_pixelstore_attrib *store,
112 const void *pixels,
113 struct st_pbo_addresses *addr)
114 {
115 struct pipe_resource *buf = st_buffer_object(store->BufferObj)->buffer;
116 intptr_t buf_offset = (intptr_t) pixels;
117
118 if (buf_offset % addr->bytes_per_pixel)
119 return false;
120
121 /* Convert to texels */
122 buf_offset = buf_offset / addr->bytes_per_pixel;
123
124 /* Determine image height */
125 if (gl_target == GL_TEXTURE_1D_ARRAY) {
126 addr->image_height = 1;
127 } else {
128 addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height;
129 }
130
131 /* Compute the stride, taking store->Alignment into account */
132 {
133 unsigned pixels_per_row = store->RowLength > 0 ?
134 store->RowLength : addr->width;
135 unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel;
136 unsigned remainder = bytes_per_row % store->Alignment;
137 unsigned offset_rows;
138
139 if (remainder > 0)
140 bytes_per_row += store->Alignment - remainder;
141
142 if (bytes_per_row % addr->bytes_per_pixel)
143 return false;
144
145 addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel;
146
147 offset_rows = store->SkipRows;
148 if (skip_images)
149 offset_rows += addr->image_height * store->SkipImages;
150
151 buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows;
152 }
153
154 if (!st_pbo_addresses_setup(st, buf, buf_offset, addr))
155 return false;
156
157 /* Support GL_PACK_INVERT_MESA */
158 if (store->Invert) {
159 addr->constants.xoffset += (addr->height - 1) * addr->constants.stride;
160 addr->constants.stride = -addr->constants.stride;
161 }
162
163 return true;
164 }
165
166 /* For download from a framebuffer, we may have to invert the Y axis. The
167 * setup is as follows:
168 * - set viewport to inverted, so that the position sysval is correct for
169 * texel fetches
170 * - this function adjusts the fragment shader's constant buffer to compute
171 * the correct destination addresses.
172 */
173 void
st_pbo_addresses_invert_y(struct st_pbo_addresses * addr,unsigned viewport_height)174 st_pbo_addresses_invert_y(struct st_pbo_addresses *addr,
175 unsigned viewport_height)
176 {
177 addr->constants.xoffset +=
178 (viewport_height - 1 + 2 * addr->constants.yoffset) * addr->constants.stride;
179 addr->constants.stride = -addr->constants.stride;
180 }
181
182 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
183 * constants, and issue the draw call for PBO upload/download.
184 *
185 * The caller is responsible for saving and restoring state, as well as for
186 * setting other fragment shader state (fragment shader, samplers), and
187 * framebuffer/viewport/DSA/blend state.
188 */
189 bool
st_pbo_draw(struct st_context * st,const struct st_pbo_addresses * addr,unsigned surface_width,unsigned surface_height)190 st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
191 unsigned surface_width, unsigned surface_height)
192 {
193 struct cso_context *cso = st->cso_context;
194
195 /* Setup vertex and geometry shaders */
196 if (!st->pbo.vs) {
197 st->pbo.vs = st_pbo_create_vs(st);
198 if (!st->pbo.vs)
199 return false;
200 }
201
202 if (addr->depth != 1 && st->pbo.use_gs && !st->pbo.gs) {
203 st->pbo.gs = st_pbo_create_gs(st);
204 if (!st->pbo.gs)
205 return false;
206 }
207
208 cso_set_vertex_shader_handle(cso, st->pbo.vs);
209
210 cso_set_geometry_shader_handle(cso, addr->depth != 1 ? st->pbo.gs : NULL);
211
212 cso_set_tessctrl_shader_handle(cso, NULL);
213
214 cso_set_tesseval_shader_handle(cso, NULL);
215
216 /* Upload vertices */
217 {
218 struct pipe_vertex_buffer vbo;
219 struct pipe_vertex_element velem;
220
221 float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
222 float y0 = (float) addr->yoffset / surface_height * 2.0f - 1.0f;
223 float x1 = (float) (addr->xoffset + addr->width) / surface_width * 2.0f - 1.0f;
224 float y1 = (float) (addr->yoffset + addr->height) / surface_height * 2.0f - 1.0f;
225
226 float *verts = NULL;
227
228 vbo.user_buffer = NULL;
229 vbo.buffer = NULL;
230 vbo.stride = 2 * sizeof(float);
231
232 u_upload_alloc(st->uploader, 0, 8 * sizeof(float), 4,
233 &vbo.buffer_offset, &vbo.buffer, (void **) &verts);
234 if (!verts)
235 return false;
236
237 verts[0] = x0;
238 verts[1] = y0;
239 verts[2] = x0;
240 verts[3] = y1;
241 verts[4] = x1;
242 verts[5] = y0;
243 verts[6] = x1;
244 verts[7] = y1;
245
246 u_upload_unmap(st->uploader);
247
248 velem.src_offset = 0;
249 velem.instance_divisor = 0;
250 velem.vertex_buffer_index = cso_get_aux_vertex_buffer_slot(cso);
251 velem.src_format = PIPE_FORMAT_R32G32_FLOAT;
252
253 cso_set_vertex_elements(cso, 1, &velem);
254
255 cso_set_vertex_buffers(cso, velem.vertex_buffer_index, 1, &vbo);
256
257 pipe_resource_reference(&vbo.buffer, NULL);
258 }
259
260 /* Upload constants */
261 {
262 struct pipe_constant_buffer cb;
263
264 if (st->constbuf_uploader) {
265 cb.buffer = NULL;
266 cb.user_buffer = NULL;
267 u_upload_data(st->constbuf_uploader, 0, sizeof(addr->constants),
268 st->ctx->Const.UniformBufferOffsetAlignment,
269 &addr->constants, &cb.buffer_offset, &cb.buffer);
270 if (!cb.buffer)
271 return false;
272
273 u_upload_unmap(st->constbuf_uploader);
274 } else {
275 cb.buffer = NULL;
276 cb.user_buffer = &addr->constants;
277 cb.buffer_offset = 0;
278 }
279 cb.buffer_size = sizeof(addr->constants);
280
281 cso_set_constant_buffer(cso, PIPE_SHADER_FRAGMENT, 0, &cb);
282
283 pipe_resource_reference(&cb.buffer, NULL);
284 }
285
286 /* Rasterizer state */
287 cso_set_rasterizer(cso, &st->pbo.raster);
288
289 /* Disable stream output */
290 cso_set_stream_outputs(cso, 0, NULL, 0);
291
292 if (addr->depth == 1) {
293 cso_draw_arrays(cso, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
294 } else {
295 cso_draw_arrays_instanced(cso, PIPE_PRIM_TRIANGLE_STRIP,
296 0, 4, 0, addr->depth);
297 }
298
299 return true;
300 }
301
302 void *
st_pbo_create_vs(struct st_context * st)303 st_pbo_create_vs(struct st_context *st)
304 {
305 struct ureg_program *ureg;
306 struct ureg_src in_pos;
307 struct ureg_src in_instanceid;
308 struct ureg_dst out_pos;
309 struct ureg_dst out_layer;
310
311 ureg = ureg_create(PIPE_SHADER_VERTEX);
312 if (!ureg)
313 return NULL;
314
315 in_pos = ureg_DECL_vs_input(ureg, TGSI_SEMANTIC_POSITION);
316
317 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
318
319 if (st->pbo.layers) {
320 in_instanceid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
321
322 if (!st->pbo.use_gs)
323 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
324 }
325
326 /* out_pos = in_pos */
327 ureg_MOV(ureg, out_pos, in_pos);
328
329 if (st->pbo.layers) {
330 if (st->pbo.use_gs) {
331 /* out_pos.z = i2f(gl_InstanceID) */
332 ureg_I2F(ureg, ureg_writemask(out_pos, TGSI_WRITEMASK_Z),
333 ureg_scalar(in_instanceid, TGSI_SWIZZLE_X));
334 } else {
335 /* out_layer = gl_InstanceID */
336 ureg_MOV(ureg, out_layer, in_instanceid);
337 }
338 }
339
340 ureg_END(ureg);
341
342 return ureg_create_shader_and_destroy(ureg, st->pipe);
343 }
344
345 void *
st_pbo_create_gs(struct st_context * st)346 st_pbo_create_gs(struct st_context *st)
347 {
348 static const int zero = 0;
349 struct ureg_program *ureg;
350 struct ureg_dst out_pos;
351 struct ureg_dst out_layer;
352 struct ureg_src in_pos;
353 struct ureg_src imm;
354 unsigned i;
355
356 ureg = ureg_create(PIPE_SHADER_GEOMETRY);
357 if (!ureg)
358 return NULL;
359
360 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM, PIPE_PRIM_TRIANGLES);
361 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM, PIPE_PRIM_TRIANGLE_STRIP);
362 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES, 3);
363
364 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
365 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
366
367 in_pos = ureg_DECL_input(ureg, TGSI_SEMANTIC_POSITION, 0, 0, 1);
368
369 imm = ureg_DECL_immediate_int(ureg, &zero, 1);
370
371 for (i = 0; i < 3; ++i) {
372 struct ureg_src in_pos_vertex = ureg_src_dimension(in_pos, i);
373
374 /* out_pos = in_pos[i] */
375 ureg_MOV(ureg, out_pos, in_pos_vertex);
376
377 /* out_layer.x = f2i(in_pos[i].z) */
378 ureg_F2I(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
379 ureg_scalar(in_pos_vertex, TGSI_SWIZZLE_Z));
380
381 ureg_EMIT(ureg, ureg_scalar(imm, TGSI_SWIZZLE_X));
382 }
383
384 ureg_END(ureg);
385
386 return ureg_create_shader_and_destroy(ureg, st->pipe);
387 }
388
389 static void
build_conversion(struct ureg_program * ureg,const struct ureg_dst * temp,enum st_pbo_conversion conversion)390 build_conversion(struct ureg_program *ureg, const struct ureg_dst *temp,
391 enum st_pbo_conversion conversion)
392 {
393 switch (conversion) {
394 case ST_PBO_CONVERT_SINT_TO_UINT:
395 ureg_IMAX(ureg, *temp, ureg_src(*temp), ureg_imm1i(ureg, 0));
396 break;
397 case ST_PBO_CONVERT_UINT_TO_SINT:
398 ureg_UMIN(ureg, *temp, ureg_src(*temp), ureg_imm1u(ureg, (1u << 31) - 1));
399 break;
400 default:
401 /* no-op */
402 break;
403 }
404 }
405
406 static void *
create_fs(struct st_context * st,bool download,enum pipe_texture_target target,enum st_pbo_conversion conversion)407 create_fs(struct st_context *st, bool download, enum pipe_texture_target target,
408 enum st_pbo_conversion conversion)
409 {
410 struct pipe_context *pipe = st->pipe;
411 struct pipe_screen *screen = pipe->screen;
412 struct ureg_program *ureg;
413 bool have_layer;
414 struct ureg_dst out;
415 struct ureg_src sampler;
416 struct ureg_src pos;
417 struct ureg_src layer;
418 struct ureg_src const0;
419 struct ureg_src const1;
420 struct ureg_dst temp0;
421
422 have_layer =
423 st->pbo.layers &&
424 (!download || target == PIPE_TEXTURE_1D_ARRAY
425 || target == PIPE_TEXTURE_2D_ARRAY
426 || target == PIPE_TEXTURE_3D
427 || target == PIPE_TEXTURE_CUBE
428 || target == PIPE_TEXTURE_CUBE_ARRAY);
429
430 ureg = ureg_create(PIPE_SHADER_FRAGMENT);
431 if (!ureg)
432 return NULL;
433
434 if (!download) {
435 out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
436 } else {
437 struct ureg_src image;
438
439 /* writeonly images do not require an explicitly given format. */
440 image = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE,
441 true, false);
442 out = ureg_dst(image);
443 }
444
445 sampler = ureg_DECL_sampler(ureg, 0);
446 if (screen->get_param(screen, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL)) {
447 pos = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_POSITION, 0);
448 } else {
449 pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0,
450 TGSI_INTERPOLATE_LINEAR);
451 }
452 if (have_layer) {
453 layer = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_LAYER, 0,
454 TGSI_INTERPOLATE_CONSTANT);
455 }
456 const0 = ureg_DECL_constant(ureg, 0);
457 const1 = ureg_DECL_constant(ureg, 1);
458 temp0 = ureg_DECL_temporary(ureg);
459
460 /* Note: const0 = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
461
462 /* temp0.xy = f2i(temp0.xy) */
463 ureg_F2I(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
464 ureg_swizzle(pos,
465 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
466 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
467
468 /* temp0.xy = temp0.xy + const0.xy */
469 ureg_UADD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
470 ureg_swizzle(ureg_src(temp0),
471 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
472 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y),
473 ureg_swizzle(const0,
474 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
475 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
476
477 /* temp0.x = const0.z * temp0.y + temp0.x */
478 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
479 ureg_scalar(const0, TGSI_SWIZZLE_Z),
480 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_Y),
481 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
482
483 if (have_layer) {
484 /* temp0.x = const0.w * layer + temp0.x */
485 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
486 ureg_scalar(const0, TGSI_SWIZZLE_W),
487 ureg_scalar(layer, TGSI_SWIZZLE_X),
488 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
489 }
490
491 /* temp0.w = 0 */
492 ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_W), ureg_imm1u(ureg, 0));
493
494 if (download) {
495 struct ureg_dst temp1;
496 struct ureg_src op[2];
497
498 temp1 = ureg_DECL_temporary(ureg);
499
500 /* temp1.xy = pos.xy */
501 ureg_F2I(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_XY), pos);
502
503 /* temp1.zw = 0 */
504 ureg_MOV(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_ZW), ureg_imm1u(ureg, 0));
505
506 if (have_layer) {
507 struct ureg_dst temp1_layer =
508 ureg_writemask(temp1, target == PIPE_TEXTURE_1D_ARRAY ? TGSI_WRITEMASK_Y
509 : TGSI_WRITEMASK_Z);
510
511 /* temp1.y/z = layer */
512 ureg_MOV(ureg, temp1_layer, ureg_scalar(layer, TGSI_SWIZZLE_X));
513
514 if (target == PIPE_TEXTURE_3D) {
515 /* temp1.z += layer_offset */
516 ureg_UADD(ureg, temp1_layer,
517 ureg_scalar(ureg_src(temp1), TGSI_SWIZZLE_Z),
518 ureg_scalar(const1, TGSI_SWIZZLE_X));
519 }
520 }
521
522 /* temp1 = txf(sampler, temp1) */
523 ureg_TXF(ureg, temp1, util_pipe_tex_to_tgsi_tex(target, 1),
524 ureg_src(temp1), sampler);
525
526 build_conversion(ureg, &temp1, conversion);
527
528 /* store(out, temp0, temp1) */
529 op[0] = ureg_src(temp0);
530 op[1] = ureg_src(temp1);
531 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &out, 1, op, 2, 0,
532 TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE);
533
534 ureg_release_temporary(ureg, temp1);
535 } else {
536 /* out = txf(sampler, temp0.x) */
537 ureg_TXF(ureg, temp0, TGSI_TEXTURE_BUFFER, ureg_src(temp0), sampler);
538
539 build_conversion(ureg, &temp0, conversion);
540
541 ureg_MOV(ureg, out, ureg_src(temp0));
542 }
543
544 ureg_release_temporary(ureg, temp0);
545
546 ureg_END(ureg);
547
548 return ureg_create_shader_and_destroy(ureg, pipe);
549 }
550
551 static enum st_pbo_conversion
get_pbo_conversion(enum pipe_format src_format,enum pipe_format dst_format)552 get_pbo_conversion(enum pipe_format src_format, enum pipe_format dst_format)
553 {
554 if (util_format_is_pure_uint(src_format)) {
555 if (util_format_is_pure_sint(dst_format))
556 return ST_PBO_CONVERT_UINT_TO_SINT;
557 } else if (util_format_is_pure_sint(src_format)) {
558 if (util_format_is_pure_uint(dst_format))
559 return ST_PBO_CONVERT_SINT_TO_UINT;
560 }
561
562 return ST_PBO_CONVERT_NONE;
563 }
564
565 void *
st_pbo_get_upload_fs(struct st_context * st,enum pipe_format src_format,enum pipe_format dst_format)566 st_pbo_get_upload_fs(struct st_context *st,
567 enum pipe_format src_format,
568 enum pipe_format dst_format)
569 {
570 STATIC_ASSERT(ARRAY_SIZE(st->pbo.upload_fs) == ST_NUM_PBO_CONVERSIONS);
571
572 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
573
574 if (!st->pbo.upload_fs[conversion])
575 st->pbo.upload_fs[conversion] = create_fs(st, false, 0, conversion);
576
577 return st->pbo.upload_fs[conversion];
578 }
579
580 void *
st_pbo_get_download_fs(struct st_context * st,enum pipe_texture_target target,enum pipe_format src_format,enum pipe_format dst_format)581 st_pbo_get_download_fs(struct st_context *st, enum pipe_texture_target target,
582 enum pipe_format src_format,
583 enum pipe_format dst_format)
584 {
585 STATIC_ASSERT(ARRAY_SIZE(st->pbo.download_fs) == ST_NUM_PBO_CONVERSIONS);
586 assert(target < PIPE_MAX_TEXTURE_TYPES);
587
588 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
589
590 if (!st->pbo.download_fs[conversion][target])
591 st->pbo.download_fs[conversion][target] = create_fs(st, true, target, conversion);
592
593 return st->pbo.download_fs[conversion][target];
594 }
595
596 void
st_init_pbo_helpers(struct st_context * st)597 st_init_pbo_helpers(struct st_context *st)
598 {
599 struct pipe_context *pipe = st->pipe;
600 struct pipe_screen *screen = pipe->screen;
601
602 st->pbo.upload_enabled =
603 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OBJECTS) &&
604 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT) >= 1 &&
605 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS);
606 if (!st->pbo.upload_enabled)
607 return;
608
609 st->pbo.download_enabled =
610 st->pbo.upload_enabled &&
611 screen->get_param(screen, PIPE_CAP_SAMPLER_VIEW_TARGET) &&
612 screen->get_param(screen, PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT) &&
613 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
614 PIPE_SHADER_CAP_MAX_SHADER_IMAGES) >= 1;
615
616 st->pbo.rgba_only =
617 screen->get_param(screen, PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY);
618
619 if (screen->get_param(screen, PIPE_CAP_TGSI_INSTANCEID)) {
620 if (screen->get_param(screen, PIPE_CAP_TGSI_VS_LAYER_VIEWPORT)) {
621 st->pbo.layers = true;
622 } else if (screen->get_param(screen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES) >= 3) {
623 st->pbo.layers = true;
624 st->pbo.use_gs = true;
625 }
626 }
627
628 /* Blend state */
629 memset(&st->pbo.upload_blend, 0, sizeof(struct pipe_blend_state));
630 st->pbo.upload_blend.rt[0].colormask = PIPE_MASK_RGBA;
631
632 /* Rasterizer state */
633 memset(&st->pbo.raster, 0, sizeof(struct pipe_rasterizer_state));
634 st->pbo.raster.half_pixel_center = 1;
635 }
636
637 void
st_destroy_pbo_helpers(struct st_context * st)638 st_destroy_pbo_helpers(struct st_context *st)
639 {
640 unsigned i;
641
642 for (i = 0; i < ARRAY_SIZE(st->pbo.upload_fs); ++i) {
643 if (st->pbo.upload_fs[i]) {
644 cso_delete_fragment_shader(st->cso_context, st->pbo.upload_fs[i]);
645 st->pbo.upload_fs[i] = NULL;
646 }
647 }
648
649 for (i = 0; i < ARRAY_SIZE(st->pbo.download_fs); ++i) {
650 for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.download_fs[0]); ++j) {
651 if (st->pbo.download_fs[i][j]) {
652 cso_delete_fragment_shader(st->cso_context, st->pbo.download_fs[i][j]);
653 st->pbo.download_fs[i][j] = NULL;
654 }
655 }
656 }
657
658 if (st->pbo.gs) {
659 cso_delete_geometry_shader(st->cso_context, st->pbo.gs);
660 st->pbo.gs = NULL;
661 }
662
663 if (st->pbo.vs) {
664 cso_delete_vertex_shader(st->cso_context, st->pbo.vs);
665 st->pbo.vs = NULL;
666 }
667 }
668