1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "brw_mipmap_tree.h"
48 #include "brw_batch.h"
49 #include "brw_tex.h"
50 #include "brw_fbo.h"
51 #include "brw_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 static const uint32_t wb_mocs[] = {
59 [7] = GFX7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 static const uint32_t pte_mocs[] = {
67 [7] = GFX7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
brw_get_bo_mocs(const struct intel_device_info * devinfo,struct brw_bo * bo)75 brw_get_bo_mocs(const struct intel_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->ver];
78 }
79
80 static void
get_isl_surf(struct brw_context * brw,struct brw_mipmap_tree * mt,GLenum target,struct isl_view * view,uint32_t * tile_x,uint32_t * tile_y,uint32_t * offset,struct isl_surf * surf)81 get_isl_surf(struct brw_context *brw, struct brw_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct intel_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += brw_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
brw_emit_surface_state(struct brw_context * brw,struct brw_mipmap_tree * mt,GLenum target,struct isl_view view,enum isl_aux_usage aux_usage,uint32_t * surf_offset,int surf_index,unsigned reloc_flags)137 brw_emit_surface_state(struct brw_context *brw,
138 struct brw_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct intel_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint64_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color = brw_miptree_get_clear_color(mt, &clear_bo, &clear_offset);
170 }
171
172 void *state = brw_state_batch(brw,
173 brw->isl_dev.ss.size,
174 brw->isl_dev.ss.align,
175 surf_offset);
176
177 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
178 .address = brw_state_reloc(&brw->batch,
179 *surf_offset + brw->isl_dev.ss.addr_offset,
180 mt->bo, offset, reloc_flags),
181 .aux_surf = aux_surf, .aux_usage = aux_usage,
182 .aux_address = aux_offset,
183 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
184 .clear_color = clear_color,
185 .use_clear_address = clear_bo != NULL,
186 .clear_address = clear_offset,
187 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
188 if (aux_surf) {
189 /* On gfx7 and prior, the upper 20 bits of surface state DWORD 6 are the
190 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
191 * contain other control information. Since buffer addresses are always
192 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
193 * an ordinary reloc to do the necessary address translation.
194 *
195 * FIXME: move to the point of assignment.
196 */
197 assert((aux_offset & 0xfff) == 0);
198
199 if (devinfo->ver >= 8) {
200 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
201 *aux_addr = brw_state_reloc(&brw->batch,
202 *surf_offset +
203 brw->isl_dev.ss.aux_addr_offset,
204 aux_bo, *aux_addr,
205 reloc_flags);
206 } else {
207 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
208 *aux_addr = brw_state_reloc(&brw->batch,
209 *surf_offset +
210 brw->isl_dev.ss.aux_addr_offset,
211 aux_bo, *aux_addr,
212 reloc_flags);
213
214 }
215 }
216
217 if (clear_bo != NULL) {
218 /* Make sure the offset is aligned with a cacheline. */
219 assert((clear_offset & 0x3f) == 0);
220 uint64_t *clear_address =
221 state + brw->isl_dev.ss.clear_color_state_offset;
222 *clear_address = brw_state_reloc(&brw->batch,
223 *surf_offset +
224 brw->isl_dev.ss.clear_color_state_offset,
225 clear_bo, *clear_address, reloc_flags);
226 }
227 }
228
229 static uint32_t
gfx6_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,unsigned unit,uint32_t surf_index)230 gfx6_update_renderbuffer_surface(struct brw_context *brw,
231 struct gl_renderbuffer *rb,
232 unsigned unit,
233 uint32_t surf_index)
234 {
235 struct gl_context *ctx = &brw->ctx;
236 struct brw_renderbuffer *irb = brw_renderbuffer(rb);
237 struct brw_mipmap_tree *mt = irb->mt;
238
239 assert(brw_render_target_supported(brw, rb));
240
241 mesa_format rb_format = _mesa_get_render_format(ctx, brw_rb_format(irb));
242 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
243 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
244 __func__, _mesa_get_format_name(rb_format));
245 }
246 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
247
248 struct isl_view view = {
249 .format = isl_format,
250 .base_level = irb->mt_level - irb->mt->first_level,
251 .levels = 1,
252 .base_array_layer = irb->mt_layer,
253 .array_len = MAX2(irb->layer_count, 1),
254 .swizzle = ISL_SWIZZLE_IDENTITY,
255 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
256 };
257
258 uint32_t offset;
259 brw_emit_surface_state(brw, mt, mt->target, view,
260 brw->draw_aux_usage[unit],
261 &offset, surf_index,
262 RELOC_WRITE);
263 return offset;
264 }
265
266 GLuint
translate_tex_target(GLenum target)267 translate_tex_target(GLenum target)
268 {
269 switch (target) {
270 case GL_TEXTURE_1D:
271 case GL_TEXTURE_1D_ARRAY_EXT:
272 return BRW_SURFACE_1D;
273
274 case GL_TEXTURE_RECTANGLE_NV:
275 return BRW_SURFACE_2D;
276
277 case GL_TEXTURE_2D:
278 case GL_TEXTURE_2D_ARRAY_EXT:
279 case GL_TEXTURE_EXTERNAL_OES:
280 case GL_TEXTURE_2D_MULTISAMPLE:
281 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
282 return BRW_SURFACE_2D;
283
284 case GL_TEXTURE_3D:
285 return BRW_SURFACE_3D;
286
287 case GL_TEXTURE_CUBE_MAP:
288 case GL_TEXTURE_CUBE_MAP_ARRAY:
289 return BRW_SURFACE_CUBE;
290
291 default:
292 unreachable("not reached");
293 }
294 }
295
296 uint32_t
brw_get_surface_tiling_bits(enum isl_tiling tiling)297 brw_get_surface_tiling_bits(enum isl_tiling tiling)
298 {
299 switch (tiling) {
300 case ISL_TILING_X:
301 return BRW_SURFACE_TILED;
302 case ISL_TILING_Y0:
303 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
304 default:
305 return 0;
306 }
307 }
308
309
310 uint32_t
brw_get_surface_num_multisamples(unsigned num_samples)311 brw_get_surface_num_multisamples(unsigned num_samples)
312 {
313 if (num_samples > 1)
314 return BRW_SURFACE_MULTISAMPLECOUNT_4;
315 else
316 return BRW_SURFACE_MULTISAMPLECOUNT_1;
317 }
318
319 /**
320 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
321 * swizzling.
322 */
323 int
brw_get_texture_swizzle(const struct gl_context * ctx,const struct gl_texture_object * t)324 brw_get_texture_swizzle(const struct gl_context *ctx,
325 const struct gl_texture_object *t)
326 {
327 const struct gl_texture_image *img = t->Image[0][t->Attrib.BaseLevel];
328
329 int swizzles[SWIZZLE_NIL + 1] = {
330 SWIZZLE_X,
331 SWIZZLE_Y,
332 SWIZZLE_Z,
333 SWIZZLE_W,
334 SWIZZLE_ZERO,
335 SWIZZLE_ONE,
336 SWIZZLE_NIL
337 };
338
339 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
340 img->_BaseFormat == GL_DEPTH_STENCIL) {
341 GLenum depth_mode = t->Attrib.DepthMode;
342
343 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
344 * with depth component data specified with a sized internal format.
345 * Otherwise, it's left at the old default, GL_LUMINANCE.
346 */
347 if (_mesa_is_gles3(ctx) &&
348 img->InternalFormat != GL_DEPTH_COMPONENT &&
349 img->InternalFormat != GL_DEPTH_STENCIL) {
350 depth_mode = GL_RED;
351 }
352
353 switch (depth_mode) {
354 case GL_ALPHA:
355 swizzles[0] = SWIZZLE_ZERO;
356 swizzles[1] = SWIZZLE_ZERO;
357 swizzles[2] = SWIZZLE_ZERO;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_LUMINANCE:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_X;
363 swizzles[2] = SWIZZLE_X;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 case GL_INTENSITY:
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_X;
371 break;
372 case GL_RED:
373 swizzles[0] = SWIZZLE_X;
374 swizzles[1] = SWIZZLE_ZERO;
375 swizzles[2] = SWIZZLE_ZERO;
376 swizzles[3] = SWIZZLE_ONE;
377 break;
378 }
379 }
380
381 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
382
383 /* If the texture's format is alpha-only, force R, G, and B to
384 * 0.0. Similarly, if the texture's format has no alpha channel,
385 * force the alpha value read to 1.0. This allows for the
386 * implementation to use an RGBA texture for any of these formats
387 * without leaking any unexpected values.
388 */
389 switch (img->_BaseFormat) {
390 case GL_ALPHA:
391 swizzles[0] = SWIZZLE_ZERO;
392 swizzles[1] = SWIZZLE_ZERO;
393 swizzles[2] = SWIZZLE_ZERO;
394 break;
395 case GL_LUMINANCE:
396 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
397 swizzles[0] = SWIZZLE_X;
398 swizzles[1] = SWIZZLE_X;
399 swizzles[2] = SWIZZLE_X;
400 swizzles[3] = SWIZZLE_ONE;
401 }
402 break;
403 case GL_LUMINANCE_ALPHA:
404 if (datatype == GL_SIGNED_NORMALIZED) {
405 swizzles[0] = SWIZZLE_X;
406 swizzles[1] = SWIZZLE_X;
407 swizzles[2] = SWIZZLE_X;
408 swizzles[3] = SWIZZLE_W;
409 }
410 break;
411 case GL_INTENSITY:
412 if (datatype == GL_SIGNED_NORMALIZED) {
413 swizzles[0] = SWIZZLE_X;
414 swizzles[1] = SWIZZLE_X;
415 swizzles[2] = SWIZZLE_X;
416 swizzles[3] = SWIZZLE_X;
417 }
418 break;
419 case GL_RED:
420 if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
421 swizzles[0] = SWIZZLE_X;
422 swizzles[1] = SWIZZLE_ZERO;
423 swizzles[2] = SWIZZLE_ZERO;
424 swizzles[3] = SWIZZLE_ONE;
425 break;
426 }
427 FALLTHROUGH;
428 case GL_RG:
429 case GL_RGB:
430 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
431 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
432 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
433 swizzles[3] = SWIZZLE_ONE;
434 break;
435 }
436
437 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->Attrib._Swizzle, 0)],
438 swizzles[GET_SWZ(t->Attrib._Swizzle, 1)],
439 swizzles[GET_SWZ(t->Attrib._Swizzle, 2)],
440 swizzles[GET_SWZ(t->Attrib._Swizzle, 3)]);
441 }
442
443 /**
444 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gfx7.5+
445 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
446 *
447 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
448 * 0 1 2 3 4 5
449 * 4 5 6 7 0 1
450 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
451 *
452 * which is simply adding 4 then modding by 8 (or anding with 7).
453 *
454 * We then may need to apply workarounds for textureGather hardware bugs.
455 */
456 static unsigned
swizzle_to_scs(GLenum swizzle,bool need_green_to_blue)457 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
458 {
459 unsigned scs = (swizzle + 4) & 7;
460
461 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
462 }
463
brw_update_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset,bool for_gather,bool for_txf,uint32_t plane)464 static void brw_update_texture_surface(struct gl_context *ctx,
465 unsigned unit,
466 uint32_t *surf_offset,
467 bool for_gather,
468 bool for_txf,
469 uint32_t plane)
470 {
471 struct brw_context *brw = brw_context(ctx);
472 const struct intel_device_info *devinfo = &brw->screen->devinfo;
473 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
474
475 if (obj->Target == GL_TEXTURE_BUFFER) {
476 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
477
478 } else {
479 struct brw_texture_object *intel_obj = brw_texture_object(obj);
480 struct brw_mipmap_tree *mt = intel_obj->mt;
481
482 if (plane > 0) {
483 if (mt->plane[plane - 1] == NULL)
484 return;
485 mt = mt->plane[plane - 1];
486 }
487
488 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
489 /* If this is a view with restricted NumLayers, then our effective depth
490 * is not just the miptree depth.
491 */
492 unsigned view_num_layers;
493 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
494 view_num_layers = obj->Attrib.NumLayers;
495 } else {
496 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
497 mt->surf.logical_level0_px.depth :
498 mt->surf.logical_level0_px.array_len;
499 }
500
501 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
502 * texturing functions that return a float, as our code generation always
503 * selects the .x channel (which would always be 0).
504 */
505 struct gl_texture_image *firstImage = obj->Image[0][obj->Attrib.BaseLevel];
506 const bool alpha_depth = obj->Attrib.DepthMode == GL_ALPHA &&
507 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
508 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
509 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
510 brw_get_texture_swizzle(&brw->ctx, obj));
511
512 mesa_format mesa_fmt;
513 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
514 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
515 /* The format from intel_obj may be a combined depth stencil format
516 * when we just want depth. Pull it from the miptree instead. This
517 * is safe because texture views aren't allowed on depth/stencil.
518 */
519 mesa_fmt = mt->format;
520 } else if (brw_miptree_has_etc_shadow(brw, mt)) {
521 mesa_fmt = mt->shadow_mt->format;
522 } else if (plane > 0) {
523 mesa_fmt = mt->format;
524 } else {
525 mesa_fmt = intel_obj->_Format;
526 }
527 enum isl_format format = translate_tex_format(brw, mesa_fmt,
528 for_txf ? GL_DECODE_EXT :
529 sampler->Attrib.sRGBDecode);
530
531 /* Implement gfx6 and gfx7 gather work-around */
532 bool need_green_to_blue = false;
533 if (for_gather) {
534 if (devinfo->ver == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
535 format == ISL_FORMAT_R32G32_SINT ||
536 format == ISL_FORMAT_R32G32_UINT)) {
537 format = ISL_FORMAT_R32G32_FLOAT_LD;
538 need_green_to_blue = devinfo->is_haswell;
539 } else if (devinfo->ver == 6) {
540 /* Sandybridge's gather4 message is broken for integer formats.
541 * To work around this, we pretend the surface is UNORM for
542 * 8 or 16-bit formats, and emit shader instructions to recover
543 * the real INT/UINT value. For 32-bit formats, we pretend
544 * the surface is FLOAT, and simply reinterpret the resulting
545 * bits.
546 */
547 switch (format) {
548 case ISL_FORMAT_R8_SINT:
549 case ISL_FORMAT_R8_UINT:
550 format = ISL_FORMAT_R8_UNORM;
551 break;
552
553 case ISL_FORMAT_R16_SINT:
554 case ISL_FORMAT_R16_UINT:
555 format = ISL_FORMAT_R16_UNORM;
556 break;
557
558 case ISL_FORMAT_R32_SINT:
559 case ISL_FORMAT_R32_UINT:
560 format = ISL_FORMAT_R32_FLOAT;
561 break;
562
563 default:
564 break;
565 }
566 }
567 }
568
569 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
570 if (devinfo->ver <= 7) {
571 assert(mt->shadow_mt && !mt->stencil_mt->shadow_needs_update);
572 mt = mt->shadow_mt;
573 } else {
574 mt = mt->stencil_mt;
575 }
576 format = ISL_FORMAT_R8_UINT;
577 } else if (devinfo->ver <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
578 assert(mt->shadow_mt && !mt->shadow_needs_update);
579 mt = mt->shadow_mt;
580 format = ISL_FORMAT_R8_UINT;
581 } else if (brw_miptree_needs_fake_etc(brw, mt)) {
582 assert(mt->shadow_mt && !mt->shadow_needs_update);
583 mt = mt->shadow_mt;
584 }
585
586 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
587
588 struct isl_view view = {
589 .format = format,
590 .base_level = obj->Attrib.MinLevel + obj->Attrib.BaseLevel,
591 .levels = intel_obj->_MaxLevel - obj->Attrib.BaseLevel + 1,
592 .base_array_layer = obj->Attrib.MinLayer,
593 .array_len = view_num_layers,
594 .swizzle = {
595 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
596 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
597 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
598 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
599 },
600 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
601 };
602
603 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
604 * code. The actual surface swizzle should be identity.
605 */
606 if (devinfo->verx10 <= 70)
607 view.swizzle = ISL_SWIZZLE_IDENTITY;
608
609 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
610 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
611 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
612
613 enum isl_aux_usage aux_usage =
614 brw_miptree_texture_aux_usage(brw, mt, format,
615 brw->gfx9_astc5x5_wa_tex_mask);
616
617 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
618 surf_offset, surf_index,
619 0);
620 }
621 }
622
623 void
brw_emit_buffer_surface_state(struct brw_context * brw,uint32_t * out_offset,struct brw_bo * bo,unsigned buffer_offset,enum isl_format format,unsigned buffer_size,unsigned pitch,unsigned reloc_flags)624 brw_emit_buffer_surface_state(struct brw_context *brw,
625 uint32_t *out_offset,
626 struct brw_bo *bo,
627 unsigned buffer_offset,
628 enum isl_format format,
629 unsigned buffer_size,
630 unsigned pitch,
631 unsigned reloc_flags)
632 {
633 const struct intel_device_info *devinfo = &brw->screen->devinfo;
634 uint32_t *dw = brw_state_batch(brw,
635 brw->isl_dev.ss.size,
636 brw->isl_dev.ss.align,
637 out_offset);
638
639 isl_buffer_fill_state(&brw->isl_dev, dw,
640 .address = !bo ? buffer_offset :
641 brw_state_reloc(&brw->batch,
642 *out_offset + brw->isl_dev.ss.addr_offset,
643 bo, buffer_offset,
644 reloc_flags),
645 .size_B = buffer_size,
646 .format = format,
647 .swizzle = ISL_SWIZZLE_IDENTITY,
648 .stride_B = pitch,
649 .mocs = brw_get_bo_mocs(devinfo, bo));
650 }
651
652 static unsigned
buffer_texture_range_size(struct brw_context * brw,struct gl_texture_object * obj)653 buffer_texture_range_size(struct brw_context *brw,
654 struct gl_texture_object *obj)
655 {
656 assert(obj->Target == GL_TEXTURE_BUFFER);
657 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
658 const unsigned buffer_size = (!obj->BufferObject ? 0 :
659 obj->BufferObject->Size);
660 const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
661
662 /* The ARB_texture_buffer_specification says:
663 *
664 * "The number of texels in the buffer texture's texel array is given by
665 *
666 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
667 *
668 * where <buffer_size> is the size of the buffer object, in basic
669 * machine units and <components> and <base_type> are the element count
670 * and base data type for elements, as specified in Table X.1. The
671 * number of texels in the texel array is then clamped to the
672 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
673 *
674 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
675 * so that when ISL divides by stride to obtain the number of texels, that
676 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
677 */
678 return MIN3((unsigned)obj->BufferSize,
679 buffer_size - buffer_offset,
680 brw->ctx.Const.MaxTextureBufferSize * texel_size);
681 }
682
683 static void
684 emit_null_surface_state(struct brw_context *brw,
685 const struct gl_framebuffer *fb,
686 uint32_t *out_offset);
687
688 void
brw_update_buffer_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset)689 brw_update_buffer_texture_surface(struct gl_context *ctx,
690 unsigned unit,
691 uint32_t *surf_offset)
692 {
693 struct brw_context *brw = brw_context(ctx);
694 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
695 struct brw_buffer_object *intel_obj =
696 brw_buffer_object(tObj->BufferObject);
697 const unsigned size = buffer_texture_range_size(brw, tObj);
698 struct brw_bo *bo = NULL;
699 mesa_format format = tObj->_BufferObjectFormat;
700 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
701 int texel_size = _mesa_get_format_bytes(format);
702
703 if (tObj->BufferObject == NULL) {
704 emit_null_surface_state(brw, NULL, surf_offset);
705 return;
706 }
707
708 if (intel_obj)
709 bo = brw_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
710 false);
711
712 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
713 _mesa_problem(NULL, "bad format %s for texture buffer\n",
714 _mesa_get_format_name(format));
715 }
716
717 brw_emit_buffer_surface_state(brw, surf_offset, bo,
718 tObj->BufferOffset,
719 isl_format,
720 size,
721 texel_size,
722 0);
723 }
724
725 /**
726 * Set up a binding table entry for use by stream output logic (transform
727 * feedback).
728 *
729 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
730 */
731 void
brw_update_sol_surface(struct brw_context * brw,struct gl_buffer_object * buffer_obj,uint32_t * out_offset,unsigned num_vector_components,unsigned stride_dwords,unsigned offset_dwords)732 brw_update_sol_surface(struct brw_context *brw,
733 struct gl_buffer_object *buffer_obj,
734 uint32_t *out_offset, unsigned num_vector_components,
735 unsigned stride_dwords, unsigned offset_dwords)
736 {
737 struct brw_buffer_object *intel_bo = brw_buffer_object(buffer_obj);
738 uint32_t offset_bytes = 4 * offset_dwords;
739 struct brw_bo *bo = brw_bufferobj_buffer(brw, intel_bo,
740 offset_bytes,
741 buffer_obj->Size - offset_bytes,
742 true);
743 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
744 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
745 size_t size_dwords = buffer_obj->Size / 4;
746 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
747
748 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
749 * too big to map using a single binding table entry?
750 */
751 assert((size_dwords - offset_dwords) / stride_dwords
752 <= BRW_MAX_NUM_BUFFER_ENTRIES);
753
754 if (size_dwords > offset_dwords + num_vector_components) {
755 /* There is room for at least 1 transform feedback output in the buffer.
756 * Compute the number of additional transform feedback outputs the
757 * buffer has room for.
758 */
759 buffer_size_minus_1 =
760 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
761 } else {
762 /* There isn't even room for a single transform feedback output in the
763 * buffer. We can't configure the binding table entry to prevent output
764 * entirely; we'll have to rely on the geometry shader to detect
765 * overflow. But to minimize the damage in case of a bug, set up the
766 * binding table entry to just allow a single output.
767 */
768 buffer_size_minus_1 = 0;
769 }
770 width = buffer_size_minus_1 & 0x7f;
771 height = (buffer_size_minus_1 & 0xfff80) >> 7;
772 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
773
774 switch (num_vector_components) {
775 case 1:
776 surface_format = ISL_FORMAT_R32_FLOAT;
777 break;
778 case 2:
779 surface_format = ISL_FORMAT_R32G32_FLOAT;
780 break;
781 case 3:
782 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
783 break;
784 case 4:
785 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
786 break;
787 default:
788 unreachable("Invalid vector size for transform feedback output");
789 }
790
791 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
792 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
793 surface_format << BRW_SURFACE_FORMAT_SHIFT |
794 BRW_SURFACE_RC_READ_WRITE;
795 surf[1] = brw_state_reloc(&brw->batch,
796 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
797 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
798 height << BRW_SURFACE_HEIGHT_SHIFT);
799 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
800 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
801 surf[4] = 0;
802 surf[5] = 0;
803 }
804
805 /* Creates a new WM constant buffer reflecting the current fragment program's
806 * constants, if needed by the fragment program.
807 *
808 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
809 * state atom.
810 */
811 static void
brw_upload_wm_pull_constants(struct brw_context * brw)812 brw_upload_wm_pull_constants(struct brw_context *brw)
813 {
814 struct brw_stage_state *stage_state = &brw->wm.base;
815 /* BRW_NEW_FRAGMENT_PROGRAM */
816 struct brw_program *fp =
817 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
818
819 /* BRW_NEW_FS_PROG_DATA */
820 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
821
822 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
823 /* _NEW_PROGRAM_CONSTANTS */
824 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
825 stage_state, prog_data);
826 }
827
828 const struct brw_tracked_state brw_wm_pull_constants = {
829 .dirty = {
830 .mesa = _NEW_PROGRAM_CONSTANTS,
831 .brw = BRW_NEW_BATCH |
832 BRW_NEW_FRAGMENT_PROGRAM |
833 BRW_NEW_FS_PROG_DATA,
834 },
835 .emit = brw_upload_wm_pull_constants,
836 };
837
838 /**
839 * Creates a null renderbuffer surface.
840 *
841 * This is used when the shader doesn't write to any color output. An FB
842 * write to target 0 will still be emitted, because that's how the thread is
843 * terminated (and computed depth is returned), so we need to have the
844 * hardware discard the target 0 color output..
845 */
846 static void
emit_null_surface_state(struct brw_context * brw,const struct gl_framebuffer * fb,uint32_t * out_offset)847 emit_null_surface_state(struct brw_context *brw,
848 const struct gl_framebuffer *fb,
849 uint32_t *out_offset)
850 {
851 const struct intel_device_info *devinfo = &brw->screen->devinfo;
852 uint32_t *surf = brw_state_batch(brw,
853 brw->isl_dev.ss.size,
854 brw->isl_dev.ss.align,
855 out_offset);
856
857 /* Use the fb dimensions or 1x1x1 */
858 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
859 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
860 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
861
862 if (devinfo->ver != 6 || samples <= 1) {
863 isl_null_fill_state(&brw->isl_dev, surf,
864 .size = isl_extent3d(width, height, 1));
865 return;
866 }
867
868 /* On Gfx6, null render targets seem to cause GPU hangs when multisampling.
869 * So work around this problem by rendering into dummy color buffer.
870 *
871 * To decrease the amount of memory needed by the workaround buffer, we
872 * set its pitch to 128 bytes (the width of a Y tile). This means that
873 * the amount of memory needed for the workaround buffer is
874 * (width_in_tiles + height_in_tiles - 1) tiles.
875 *
876 * Note that since the workaround buffer will be interpreted by the
877 * hardware as an interleaved multisampled buffer, we need to compute
878 * width_in_tiles and height_in_tiles by dividing the width and height
879 * by 16 rather than the normal Y-tile size of 32.
880 */
881 unsigned width_in_tiles = ALIGN(width, 16) / 16;
882 unsigned height_in_tiles = ALIGN(height, 16) / 16;
883 unsigned pitch_minus_1 = 127;
884 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
885 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
886 size_needed);
887
888 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
889 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
890 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
891 brw->wm.multisampled_null_render_target_bo,
892 0, RELOC_WRITE);
893
894 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
895 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
896
897 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
898 * Notes):
899 *
900 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
901 */
902 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
903 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
904 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
905 surf[5] = 0;
906 }
907
908 /**
909 * Sets up a surface state structure to point at the given region.
910 * While it is only used for the front/back buffer currently, it should be
911 * usable for further buffers when doing ARB_draw_buffer support.
912 */
913 static uint32_t
gfx4_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,unsigned unit,uint32_t surf_index)914 gfx4_update_renderbuffer_surface(struct brw_context *brw,
915 struct gl_renderbuffer *rb,
916 unsigned unit,
917 uint32_t surf_index)
918 {
919 const struct intel_device_info *devinfo = &brw->screen->devinfo;
920 struct gl_context *ctx = &brw->ctx;
921 struct brw_renderbuffer *irb = brw_renderbuffer(rb);
922 struct brw_mipmap_tree *mt = irb->mt;
923 uint32_t *surf;
924 uint32_t tile_x, tile_y;
925 enum isl_format format;
926 uint32_t offset;
927 /* _NEW_BUFFERS */
928 mesa_format rb_format = _mesa_get_render_format(ctx, brw_rb_format(irb));
929 /* BRW_NEW_FS_PROG_DATA */
930
931 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
932 brw_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
933
934 if (tile_x != 0 || tile_y != 0) {
935 /* Original gfx4 hardware couldn't draw to a non-tile-aligned
936 * destination in a miptree unless you actually setup your renderbuffer
937 * as a miptree and used the fragile lod/array_index/etc. controls to
938 * select the image. So, instead, we just make a new single-level
939 * miptree and render into that.
940 */
941 brw_renderbuffer_move_to_temp(brw, irb, false);
942 assert(irb->align_wa_mt);
943 mt = irb->align_wa_mt;
944 }
945 }
946
947 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
948
949 format = brw->mesa_to_isl_render_format[rb_format];
950 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
951 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
952 __func__, _mesa_get_format_name(rb_format));
953 }
954
955 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
956 format << BRW_SURFACE_FORMAT_SHIFT);
957
958 /* reloc */
959 assert(mt->offset % mt->cpp == 0);
960 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
961 mt->offset +
962 brw_renderbuffer_get_tile_offsets(irb,
963 &tile_x,
964 &tile_y),
965 RELOC_WRITE);
966
967 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
968 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
969
970 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
971 (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
972
973 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
974
975 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
976 /* Note that the low bits of these fields are missing, so
977 * there's the possibility of getting in trouble.
978 */
979 assert(tile_x % 4 == 0);
980 assert(tile_y % 2 == 0);
981 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
982 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
983 (mt->surf.image_alignment_el.height == 4 ?
984 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
985
986 if (devinfo->ver < 6) {
987 /* _NEW_COLOR */
988 if (!ctx->Color.ColorLogicOpEnabled &&
989 ctx->Color._AdvancedBlendMode == BLEND_NONE &&
990 (ctx->Color.BlendEnabled & (1 << unit)))
991 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
992
993 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
994 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
995 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
996 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
997 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
998 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
999
1000 /* As mentioned above, disable writes to the alpha component when the
1001 * renderbuffer is XRGB.
1002 */
1003 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1004 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
1005 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1006 }
1007 }
1008
1009 return offset;
1010 }
1011
1012 static void
update_renderbuffer_surfaces(struct brw_context * brw)1013 update_renderbuffer_surfaces(struct brw_context *brw)
1014 {
1015 const struct intel_device_info *devinfo = &brw->screen->devinfo;
1016 const struct gl_context *ctx = &brw->ctx;
1017
1018 /* _NEW_BUFFERS | _NEW_COLOR */
1019 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1020
1021 /* Render targets always start at binding table index 0. */
1022 const unsigned rt_start = 0;
1023
1024 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1025
1026 /* Update surfaces for drawing buffers */
1027 if (fb->_NumColorDrawBuffers >= 1) {
1028 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1029 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1030
1031 if (brw_renderbuffer(rb)) {
1032 surf_offsets[rt_start + i] = devinfo->ver >= 6 ?
1033 gfx6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1034 gfx4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1035 } else {
1036 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1037 }
1038 }
1039 } else {
1040 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1041 }
1042
1043 /* The PIPE_CONTROL command description says:
1044 *
1045 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1046 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1047 * Target Cache Flush by enabling this bit. When render target flush
1048 * is set due to new association of BTI, PS Scoreboard Stall bit must
1049 * be set in this packet."
1050 */
1051 if (devinfo->ver >= 11) {
1052 brw_emit_pipe_control_flush(brw,
1053 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1054 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1055 }
1056
1057 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1058 }
1059
1060 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1061 .dirty = {
1062 .mesa = _NEW_BUFFERS |
1063 _NEW_COLOR,
1064 .brw = BRW_NEW_BATCH,
1065 },
1066 .emit = update_renderbuffer_surfaces,
1067 };
1068
1069 const struct brw_tracked_state gfx6_renderbuffer_surfaces = {
1070 .dirty = {
1071 .mesa = _NEW_BUFFERS,
1072 .brw = BRW_NEW_BATCH |
1073 BRW_NEW_AUX_STATE,
1074 },
1075 .emit = update_renderbuffer_surfaces,
1076 };
1077
1078 static void
update_renderbuffer_read_surfaces(struct brw_context * brw)1079 update_renderbuffer_read_surfaces(struct brw_context *brw)
1080 {
1081 const struct gl_context *ctx = &brw->ctx;
1082
1083 /* BRW_NEW_FS_PROG_DATA */
1084 const struct brw_wm_prog_data *wm_prog_data =
1085 brw_wm_prog_data(brw->wm.base.prog_data);
1086
1087 if (wm_prog_data->has_render_target_reads &&
1088 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1089 /* _NEW_BUFFERS */
1090 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1091
1092 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1093 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1094 const struct brw_renderbuffer *irb = brw_renderbuffer(rb);
1095 const unsigned surf_index =
1096 wm_prog_data->binding_table.render_target_read_start + i;
1097 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1098
1099 if (irb) {
1100 const enum isl_format format = brw->mesa_to_isl_render_format[
1101 _mesa_get_render_format(ctx, brw_rb_format(irb))];
1102 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1103 format));
1104
1105 /* Override the target of the texture if the render buffer is a
1106 * single slice of a 3D texture (since the minimum array element
1107 * field of the surface state structure is ignored by the sampler
1108 * unit for 3D textures on some hardware), or if the render buffer
1109 * is a 1D array (since shaders always provide the array index
1110 * coordinate at the Z component to avoid state-dependent
1111 * recompiles when changing the texture target of the
1112 * framebuffer).
1113 */
1114 const GLenum target =
1115 (irb->mt->target == GL_TEXTURE_3D &&
1116 irb->layer_count == 1) ? GL_TEXTURE_2D :
1117 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1118 irb->mt->target;
1119
1120 const struct isl_view view = {
1121 .format = format,
1122 .base_level = irb->mt_level - irb->mt->first_level,
1123 .levels = 1,
1124 .base_array_layer = irb->mt_layer,
1125 .array_len = irb->layer_count,
1126 .swizzle = ISL_SWIZZLE_IDENTITY,
1127 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1128 };
1129
1130 enum isl_aux_usage aux_usage =
1131 brw_miptree_texture_aux_usage(brw, irb->mt, format,
1132 brw->gfx9_astc5x5_wa_tex_mask);
1133 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1134 aux_usage = ISL_AUX_USAGE_NONE;
1135
1136 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1137 surf_offset, surf_index,
1138 0);
1139
1140 } else {
1141 emit_null_surface_state(brw, fb, surf_offset);
1142 }
1143 }
1144
1145 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1146 }
1147 }
1148
1149 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1150 .dirty = {
1151 .mesa = _NEW_BUFFERS,
1152 .brw = BRW_NEW_BATCH |
1153 BRW_NEW_AUX_STATE |
1154 BRW_NEW_FS_PROG_DATA,
1155 },
1156 .emit = update_renderbuffer_read_surfaces,
1157 };
1158
1159 static bool
is_depth_texture(struct brw_texture_object * iobj)1160 is_depth_texture(struct brw_texture_object *iobj)
1161 {
1162 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1163 return base_format == GL_DEPTH_COMPONENT ||
1164 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1165 }
1166
1167 static void
update_stage_texture_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,bool for_gather,uint32_t plane)1168 update_stage_texture_surfaces(struct brw_context *brw,
1169 const struct gl_program *prog,
1170 struct brw_stage_state *stage_state,
1171 bool for_gather, uint32_t plane)
1172 {
1173 if (!prog)
1174 return;
1175
1176 struct gl_context *ctx = &brw->ctx;
1177
1178 uint32_t *surf_offset = stage_state->surf_offset;
1179
1180 /* BRW_NEW_*_PROG_DATA */
1181 if (for_gather)
1182 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1183 else
1184 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1185
1186 unsigned num_samplers = BITSET_LAST_BIT(prog->info.textures_used);
1187 for (unsigned s = 0; s < num_samplers; s++) {
1188 surf_offset[s] = 0;
1189
1190 if (BITSET_TEST(prog->info.textures_used, s)) {
1191 const unsigned unit = prog->SamplerUnits[s];
1192 const bool used_by_txf = BITSET_TEST(prog->info.textures_used_by_txf, s);
1193 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1194 struct brw_texture_object *iobj = brw_texture_object(obj);
1195
1196 /* _NEW_TEXTURE */
1197 if (!obj)
1198 continue;
1199
1200 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1201 /* A programming note for the sample_c message says:
1202 *
1203 * "The Surface Format of the associated surface must be
1204 * indicated as supporting shadow mapping as indicated in the
1205 * surface format table."
1206 *
1207 * Accessing non-depth textures via a sampler*Shadow type is
1208 * undefined. GLSL 4.50 page 162 says:
1209 *
1210 * "If a shadow texture call is made to a sampler that does not
1211 * represent a depth texture, then results are undefined."
1212 *
1213 * We give them a null surface (zeros) for undefined. We've seen
1214 * GPU hangs with color buffers and sample_c, so we try and avoid
1215 * those with this hack.
1216 */
1217 emit_null_surface_state(brw, NULL, surf_offset + s);
1218 } else {
1219 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1220 used_by_txf, plane);
1221 }
1222 }
1223 }
1224 }
1225
1226
1227 /**
1228 * Construct SURFACE_STATE objects for enabled textures.
1229 */
1230 static void
brw_update_texture_surfaces(struct brw_context * brw)1231 brw_update_texture_surfaces(struct brw_context *brw)
1232 {
1233 const struct intel_device_info *devinfo = &brw->screen->devinfo;
1234
1235 /* BRW_NEW_VERTEX_PROGRAM */
1236 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1237
1238 /* BRW_NEW_TESS_PROGRAMS */
1239 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1240 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1241
1242 /* BRW_NEW_GEOMETRY_PROGRAM */
1243 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1244
1245 /* BRW_NEW_FRAGMENT_PROGRAM */
1246 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1247
1248 /* _NEW_TEXTURE */
1249 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1250 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1251 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1252 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1253 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1254
1255 /* emit alternate set of surface state for gather. this
1256 * allows the surface format to be overriden for only the
1257 * gather4 messages. */
1258 if (devinfo->ver < 8) {
1259 if (vs && vs->info.uses_texture_gather)
1260 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1261 if (tcs && tcs->info.uses_texture_gather)
1262 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1263 if (tes && tes->info.uses_texture_gather)
1264 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1265 if (gs && gs->info.uses_texture_gather)
1266 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1267 if (fs && fs->info.uses_texture_gather)
1268 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1269 }
1270
1271 if (fs) {
1272 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1273 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1274 }
1275
1276 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1277 }
1278
1279 const struct brw_tracked_state brw_texture_surfaces = {
1280 .dirty = {
1281 .mesa = _NEW_TEXTURE,
1282 .brw = BRW_NEW_BATCH |
1283 BRW_NEW_AUX_STATE |
1284 BRW_NEW_FRAGMENT_PROGRAM |
1285 BRW_NEW_FS_PROG_DATA |
1286 BRW_NEW_GEOMETRY_PROGRAM |
1287 BRW_NEW_GS_PROG_DATA |
1288 BRW_NEW_TESS_PROGRAMS |
1289 BRW_NEW_TCS_PROG_DATA |
1290 BRW_NEW_TES_PROG_DATA |
1291 BRW_NEW_TEXTURE_BUFFER |
1292 BRW_NEW_VERTEX_PROGRAM |
1293 BRW_NEW_VS_PROG_DATA,
1294 },
1295 .emit = brw_update_texture_surfaces,
1296 };
1297
1298 static void
brw_update_cs_texture_surfaces(struct brw_context * brw)1299 brw_update_cs_texture_surfaces(struct brw_context *brw)
1300 {
1301 const struct intel_device_info *devinfo = &brw->screen->devinfo;
1302
1303 /* BRW_NEW_COMPUTE_PROGRAM */
1304 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1305
1306 /* _NEW_TEXTURE */
1307 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1308
1309 /* emit alternate set of surface state for gather. this
1310 * allows the surface format to be overriden for only the
1311 * gather4 messages.
1312 */
1313 if (devinfo->ver < 8) {
1314 if (cs && cs->info.uses_texture_gather)
1315 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1316 }
1317
1318 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1319 }
1320
1321 const struct brw_tracked_state brw_cs_texture_surfaces = {
1322 .dirty = {
1323 .mesa = _NEW_TEXTURE,
1324 .brw = BRW_NEW_BATCH |
1325 BRW_NEW_COMPUTE_PROGRAM |
1326 BRW_NEW_AUX_STATE,
1327 },
1328 .emit = brw_update_cs_texture_surfaces,
1329 };
1330
1331 static void
upload_buffer_surface(struct brw_context * brw,struct gl_buffer_binding * binding,uint32_t * out_offset,enum isl_format format,unsigned reloc_flags)1332 upload_buffer_surface(struct brw_context *brw,
1333 struct gl_buffer_binding *binding,
1334 uint32_t *out_offset,
1335 enum isl_format format,
1336 unsigned reloc_flags)
1337 {
1338 if (!binding->BufferObject) {
1339 emit_null_surface_state(brw, NULL, out_offset);
1340 } else {
1341 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1342 if (!binding->AutomaticSize)
1343 size = MIN2(size, binding->Size);
1344
1345 if (size == 0) {
1346 emit_null_surface_state(brw, NULL, out_offset);
1347 return;
1348 }
1349
1350 struct brw_buffer_object *iobj =
1351 brw_buffer_object(binding->BufferObject);
1352 struct brw_bo *bo =
1353 brw_bufferobj_buffer(brw, iobj, binding->Offset, size,
1354 (reloc_flags & RELOC_WRITE) != 0);
1355
1356 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1357 format, size, 1, reloc_flags);
1358 }
1359 }
1360
1361 void
brw_upload_ubo_surfaces(struct brw_context * brw,struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1362 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1363 struct brw_stage_state *stage_state,
1364 struct brw_stage_prog_data *prog_data)
1365 {
1366 struct gl_context *ctx = &brw->ctx;
1367
1368 if (!prog || (prog->info.num_ubos == 0 &&
1369 prog->info.num_ssbos == 0 &&
1370 prog->info.num_abos == 0))
1371 return;
1372
1373 if (prog->info.num_ubos) {
1374 assert(prog_data->binding_table.ubo_start < BRW_MAX_SURFACES);
1375 uint32_t *ubo_surf_offsets =
1376 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1377
1378 for (int i = 0; i < prog->info.num_ubos; i++) {
1379 struct gl_buffer_binding *binding =
1380 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1381 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1382 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1383 }
1384 }
1385
1386 if (prog->info.num_ssbos || prog->info.num_abos) {
1387 assert(prog_data->binding_table.ssbo_start < BRW_MAX_SURFACES);
1388 uint32_t *ssbo_surf_offsets =
1389 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1390 uint32_t *abo_surf_offsets = ssbo_surf_offsets + prog->info.num_ssbos;
1391
1392 for (int i = 0; i < prog->info.num_abos; i++) {
1393 struct gl_buffer_binding *binding =
1394 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1395 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1396 ISL_FORMAT_RAW, RELOC_WRITE);
1397 }
1398
1399 for (int i = 0; i < prog->info.num_ssbos; i++) {
1400 struct gl_buffer_binding *binding =
1401 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1402
1403 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1404 ISL_FORMAT_RAW, RELOC_WRITE);
1405 }
1406 }
1407
1408 stage_state->push_constants_dirty = true;
1409 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1410 }
1411
1412 static void
brw_upload_wm_ubo_surfaces(struct brw_context * brw)1413 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1414 {
1415 struct gl_context *ctx = &brw->ctx;
1416 /* _NEW_PROGRAM */
1417 struct gl_program *prog = ctx->FragmentProgram._Current;
1418
1419 /* BRW_NEW_FS_PROG_DATA */
1420 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1421 }
1422
1423 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1424 .dirty = {
1425 .mesa = _NEW_PROGRAM,
1426 .brw = BRW_NEW_BATCH |
1427 BRW_NEW_FS_PROG_DATA |
1428 BRW_NEW_UNIFORM_BUFFER,
1429 },
1430 .emit = brw_upload_wm_ubo_surfaces,
1431 };
1432
1433 static void
brw_upload_cs_ubo_surfaces(struct brw_context * brw)1434 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1435 {
1436 struct gl_context *ctx = &brw->ctx;
1437 /* _NEW_PROGRAM */
1438 struct gl_program *prog =
1439 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1440
1441 /* BRW_NEW_CS_PROG_DATA */
1442 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1443 }
1444
1445 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1446 .dirty = {
1447 .mesa = _NEW_PROGRAM,
1448 .brw = BRW_NEW_BATCH |
1449 BRW_NEW_CS_PROG_DATA |
1450 BRW_NEW_UNIFORM_BUFFER,
1451 },
1452 .emit = brw_upload_cs_ubo_surfaces,
1453 };
1454
1455 static void
brw_upload_cs_image_surfaces(struct brw_context * brw)1456 brw_upload_cs_image_surfaces(struct brw_context *brw)
1457 {
1458 /* _NEW_PROGRAM */
1459 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1460
1461 if (cp) {
1462 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1463 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1464 brw->cs.base.prog_data);
1465 }
1466 }
1467
1468 const struct brw_tracked_state brw_cs_image_surfaces = {
1469 .dirty = {
1470 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1471 .brw = BRW_NEW_BATCH |
1472 BRW_NEW_CS_PROG_DATA |
1473 BRW_NEW_AUX_STATE |
1474 BRW_NEW_IMAGE_UNITS
1475 },
1476 .emit = brw_upload_cs_image_surfaces,
1477 };
1478
1479 static uint32_t
get_image_format(struct brw_context * brw,mesa_format format,GLenum access)1480 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1481 {
1482 const struct intel_device_info *devinfo = &brw->screen->devinfo;
1483 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1484 if (access == GL_WRITE_ONLY || access == GL_NONE) {
1485 return hw_format;
1486 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1487 /* Typed surface reads support a very limited subset of the shader
1488 * image formats. Translate it into the closest format the
1489 * hardware supports.
1490 */
1491 return isl_lower_storage_image_format(devinfo, hw_format);
1492 } else {
1493 /* The hardware doesn't actually support a typed format that we can use
1494 * so we have to fall back to untyped read/write messages.
1495 */
1496 return ISL_FORMAT_RAW;
1497 }
1498 }
1499
1500 static void
update_default_image_param(struct brw_context * brw,struct gl_image_unit * u,struct brw_image_param * param)1501 update_default_image_param(struct brw_context *brw,
1502 struct gl_image_unit *u,
1503 struct brw_image_param *param)
1504 {
1505 memset(param, 0, sizeof(*param));
1506 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1507 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1508 * detailed explanation of these parameters.
1509 */
1510 param->swizzling[0] = 0xff;
1511 param->swizzling[1] = 0xff;
1512 }
1513
1514 static void
update_buffer_image_param(struct brw_context * brw,struct gl_image_unit * u,struct brw_image_param * param)1515 update_buffer_image_param(struct brw_context *brw,
1516 struct gl_image_unit *u,
1517 struct brw_image_param *param)
1518 {
1519 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1520 update_default_image_param(brw, u, param);
1521
1522 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1523 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1524 }
1525
1526 static void
update_image_surface(struct brw_context * brw,struct gl_image_unit * u,GLenum access,uint32_t * surf_offset,struct brw_image_param * param)1527 update_image_surface(struct brw_context *brw,
1528 struct gl_image_unit *u,
1529 GLenum access,
1530 uint32_t *surf_offset,
1531 struct brw_image_param *param)
1532 {
1533 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1534 struct gl_texture_object *obj = u->TexObj;
1535 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1536 const bool written = (access != GL_READ_ONLY && access != GL_NONE);
1537
1538 if (obj->Target == GL_TEXTURE_BUFFER) {
1539 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1540 _mesa_get_format_bytes(u->_ActualFormat));
1541 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1542 struct brw_bo *const bo = !obj->BufferObject ? NULL :
1543 brw_bufferobj_buffer(brw, brw_buffer_object(obj->BufferObject),
1544 obj->BufferOffset, buffer_size, written);
1545
1546 brw_emit_buffer_surface_state(
1547 brw, surf_offset, bo, obj->BufferOffset,
1548 format, buffer_size, texel_size,
1549 written ? RELOC_WRITE : 0);
1550
1551 update_buffer_image_param(brw, u, param);
1552
1553 } else {
1554 struct brw_texture_object *intel_obj = brw_texture_object(obj);
1555 struct brw_mipmap_tree *mt = intel_obj->mt;
1556
1557 unsigned base_layer, num_layers;
1558 if (u->Layered) {
1559 if (obj->Target == GL_TEXTURE_3D) {
1560 base_layer = 0;
1561 num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
1562 } else {
1563 assert(obj->Immutable || obj->Attrib.MinLayer == 0);
1564 base_layer = obj->Attrib.MinLayer;
1565 num_layers = obj->Immutable ?
1566 obj->Attrib.NumLayers :
1567 mt->surf.logical_level0_px.array_len;
1568 }
1569 } else {
1570 base_layer = obj->Attrib.MinLayer + u->_Layer;
1571 num_layers = 1;
1572 }
1573
1574 struct isl_view view = {
1575 .format = format,
1576 .base_level = obj->Attrib.MinLevel + u->Level,
1577 .levels = 1,
1578 .base_array_layer = base_layer,
1579 .array_len = num_layers,
1580 .swizzle = ISL_SWIZZLE_IDENTITY,
1581 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1582 };
1583
1584 if (format == ISL_FORMAT_RAW) {
1585 brw_emit_buffer_surface_state(
1586 brw, surf_offset, mt->bo, mt->offset,
1587 format, mt->bo->size - mt->offset, 1 /* pitch */,
1588 written ? RELOC_WRITE : 0);
1589
1590 } else {
1591 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1592 assert(!brw_miptree_has_color_unresolved(mt,
1593 view.base_level, 1,
1594 view.base_array_layer,
1595 view.array_len));
1596 brw_emit_surface_state(brw, mt, mt->target, view,
1597 ISL_AUX_USAGE_NONE,
1598 surf_offset, surf_index,
1599 written ? RELOC_WRITE : 0);
1600 }
1601
1602 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1603 }
1604
1605 } else {
1606 emit_null_surface_state(brw, NULL, surf_offset);
1607 update_default_image_param(brw, u, param);
1608 }
1609 }
1610
1611 void
brw_upload_image_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1612 brw_upload_image_surfaces(struct brw_context *brw,
1613 const struct gl_program *prog,
1614 struct brw_stage_state *stage_state,
1615 struct brw_stage_prog_data *prog_data)
1616 {
1617 assert(prog);
1618 struct gl_context *ctx = &brw->ctx;
1619
1620 if (prog->info.num_images) {
1621 for (unsigned i = 0; i < prog->info.num_images; i++) {
1622 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1623 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1624
1625 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1626 &stage_state->surf_offset[surf_idx],
1627 &stage_state->image_param[i]);
1628 }
1629
1630 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1631 /* This may have changed the image metadata dependent on the context
1632 * image unit state and passed to the program as uniforms, make sure
1633 * that push and pull constants are reuploaded.
1634 */
1635 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1636 }
1637 }
1638
1639 static void
brw_upload_wm_image_surfaces(struct brw_context * brw)1640 brw_upload_wm_image_surfaces(struct brw_context *brw)
1641 {
1642 /* BRW_NEW_FRAGMENT_PROGRAM */
1643 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1644
1645 if (wm) {
1646 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1647 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1648 brw->wm.base.prog_data);
1649 }
1650 }
1651
1652 const struct brw_tracked_state brw_wm_image_surfaces = {
1653 .dirty = {
1654 .mesa = _NEW_TEXTURE,
1655 .brw = BRW_NEW_BATCH |
1656 BRW_NEW_AUX_STATE |
1657 BRW_NEW_FRAGMENT_PROGRAM |
1658 BRW_NEW_FS_PROG_DATA |
1659 BRW_NEW_IMAGE_UNITS
1660 },
1661 .emit = brw_upload_wm_image_surfaces,
1662 };
1663
1664 static void
brw_upload_cs_work_groups_surface(struct brw_context * brw)1665 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1666 {
1667 struct gl_context *ctx = &brw->ctx;
1668 /* _NEW_PROGRAM */
1669 struct gl_program *prog =
1670 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1671 /* BRW_NEW_CS_PROG_DATA */
1672 const struct brw_cs_prog_data *cs_prog_data =
1673 brw_cs_prog_data(brw->cs.base.prog_data);
1674
1675 if (prog && cs_prog_data->uses_num_work_groups) {
1676 const unsigned surf_idx =
1677 cs_prog_data->binding_table.work_groups_start;
1678 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1679 struct brw_bo *bo;
1680 uint32_t bo_offset;
1681
1682 if (brw->compute.num_work_groups_bo == NULL) {
1683 bo = NULL;
1684 brw_upload_data(&brw->upload,
1685 (void *)brw->compute.num_work_groups,
1686 3 * sizeof(GLuint),
1687 sizeof(GLuint),
1688 &bo,
1689 &bo_offset);
1690 } else {
1691 bo = brw->compute.num_work_groups_bo;
1692 bo_offset = brw->compute.num_work_groups_offset;
1693 }
1694
1695 brw_emit_buffer_surface_state(brw, surf_offset,
1696 bo, bo_offset,
1697 ISL_FORMAT_RAW,
1698 3 * sizeof(GLuint), 1,
1699 RELOC_WRITE);
1700
1701 /* The state buffer now holds a reference to our upload, drop ours. */
1702 if (bo != brw->compute.num_work_groups_bo)
1703 brw_bo_unreference(bo);
1704
1705 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1706 }
1707 }
1708
1709 const struct brw_tracked_state brw_cs_work_groups_surface = {
1710 .dirty = {
1711 .brw = BRW_NEW_CS_PROG_DATA |
1712 BRW_NEW_CS_WORK_GROUPS
1713 },
1714 .emit = brw_upload_cs_work_groups_surface,
1715 };
1716