1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 static const uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 static const uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
brw_get_bo_mocs(const struct gen_device_info * devinfo,struct brw_bo * bo)75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
get_isl_surf(struct brw_context * brw,struct intel_mipmap_tree * mt,GLenum target,struct isl_view * view,uint32_t * tile_x,uint32_t * tile_y,uint32_t * offset,struct isl_surf * surf)81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
brw_emit_surface_state(struct brw_context * brw,struct intel_mipmap_tree * mt,GLenum target,struct isl_view view,enum isl_aux_usage aux_usage,uint32_t * surf_offset,int surf_index,unsigned reloc_flags)137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint64_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color = intel_miptree_get_clear_color(mt, &clear_bo, &clear_offset);
170 }
171
172 void *state = brw_state_batch(brw,
173 brw->isl_dev.ss.size,
174 brw->isl_dev.ss.align,
175 surf_offset);
176
177 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
178 .address = brw_state_reloc(&brw->batch,
179 *surf_offset + brw->isl_dev.ss.addr_offset,
180 mt->bo, offset, reloc_flags),
181 .aux_surf = aux_surf, .aux_usage = aux_usage,
182 .aux_address = aux_offset,
183 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
184 .clear_color = clear_color,
185 .use_clear_address = clear_bo != NULL,
186 .clear_address = clear_offset,
187 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
188 if (aux_surf) {
189 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
190 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
191 * contain other control information. Since buffer addresses are always
192 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
193 * an ordinary reloc to do the necessary address translation.
194 *
195 * FIXME: move to the point of assignment.
196 */
197 assert((aux_offset & 0xfff) == 0);
198
199 if (devinfo->gen >= 8) {
200 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
201 *aux_addr = brw_state_reloc(&brw->batch,
202 *surf_offset +
203 brw->isl_dev.ss.aux_addr_offset,
204 aux_bo, *aux_addr,
205 reloc_flags);
206 } else {
207 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
208 *aux_addr = brw_state_reloc(&brw->batch,
209 *surf_offset +
210 brw->isl_dev.ss.aux_addr_offset,
211 aux_bo, *aux_addr,
212 reloc_flags);
213
214 }
215 }
216
217 if (clear_bo != NULL) {
218 /* Make sure the offset is aligned with a cacheline. */
219 assert((clear_offset & 0x3f) == 0);
220 uint64_t *clear_address =
221 state + brw->isl_dev.ss.clear_color_state_offset;
222 *clear_address = brw_state_reloc(&brw->batch,
223 *surf_offset +
224 brw->isl_dev.ss.clear_color_state_offset,
225 clear_bo, *clear_address, reloc_flags);
226 }
227 }
228
229 static uint32_t
gen6_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,unsigned unit,uint32_t surf_index)230 gen6_update_renderbuffer_surface(struct brw_context *brw,
231 struct gl_renderbuffer *rb,
232 unsigned unit,
233 uint32_t surf_index)
234 {
235 struct gl_context *ctx = &brw->ctx;
236 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
237 struct intel_mipmap_tree *mt = irb->mt;
238
239 assert(brw_render_target_supported(brw, rb));
240
241 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
242 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
243 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
244 __func__, _mesa_get_format_name(rb_format));
245 }
246 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
247
248 struct isl_view view = {
249 .format = isl_format,
250 .base_level = irb->mt_level - irb->mt->first_level,
251 .levels = 1,
252 .base_array_layer = irb->mt_layer,
253 .array_len = MAX2(irb->layer_count, 1),
254 .swizzle = ISL_SWIZZLE_IDENTITY,
255 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
256 };
257
258 uint32_t offset;
259 brw_emit_surface_state(brw, mt, mt->target, view,
260 brw->draw_aux_usage[unit],
261 &offset, surf_index,
262 RELOC_WRITE);
263 return offset;
264 }
265
266 GLuint
translate_tex_target(GLenum target)267 translate_tex_target(GLenum target)
268 {
269 switch (target) {
270 case GL_TEXTURE_1D:
271 case GL_TEXTURE_1D_ARRAY_EXT:
272 return BRW_SURFACE_1D;
273
274 case GL_TEXTURE_RECTANGLE_NV:
275 return BRW_SURFACE_2D;
276
277 case GL_TEXTURE_2D:
278 case GL_TEXTURE_2D_ARRAY_EXT:
279 case GL_TEXTURE_EXTERNAL_OES:
280 case GL_TEXTURE_2D_MULTISAMPLE:
281 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
282 return BRW_SURFACE_2D;
283
284 case GL_TEXTURE_3D:
285 return BRW_SURFACE_3D;
286
287 case GL_TEXTURE_CUBE_MAP:
288 case GL_TEXTURE_CUBE_MAP_ARRAY:
289 return BRW_SURFACE_CUBE;
290
291 default:
292 unreachable("not reached");
293 }
294 }
295
296 uint32_t
brw_get_surface_tiling_bits(enum isl_tiling tiling)297 brw_get_surface_tiling_bits(enum isl_tiling tiling)
298 {
299 switch (tiling) {
300 case ISL_TILING_X:
301 return BRW_SURFACE_TILED;
302 case ISL_TILING_Y0:
303 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
304 default:
305 return 0;
306 }
307 }
308
309
310 uint32_t
brw_get_surface_num_multisamples(unsigned num_samples)311 brw_get_surface_num_multisamples(unsigned num_samples)
312 {
313 if (num_samples > 1)
314 return BRW_SURFACE_MULTISAMPLECOUNT_4;
315 else
316 return BRW_SURFACE_MULTISAMPLECOUNT_1;
317 }
318
319 /**
320 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
321 * swizzling.
322 */
323 int
brw_get_texture_swizzle(const struct gl_context * ctx,const struct gl_texture_object * t)324 brw_get_texture_swizzle(const struct gl_context *ctx,
325 const struct gl_texture_object *t)
326 {
327 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
328
329 int swizzles[SWIZZLE_NIL + 1] = {
330 SWIZZLE_X,
331 SWIZZLE_Y,
332 SWIZZLE_Z,
333 SWIZZLE_W,
334 SWIZZLE_ZERO,
335 SWIZZLE_ONE,
336 SWIZZLE_NIL
337 };
338
339 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
340 img->_BaseFormat == GL_DEPTH_STENCIL) {
341 GLenum depth_mode = t->DepthMode;
342
343 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
344 * with depth component data specified with a sized internal format.
345 * Otherwise, it's left at the old default, GL_LUMINANCE.
346 */
347 if (_mesa_is_gles3(ctx) &&
348 img->InternalFormat != GL_DEPTH_COMPONENT &&
349 img->InternalFormat != GL_DEPTH_STENCIL) {
350 depth_mode = GL_RED;
351 }
352
353 switch (depth_mode) {
354 case GL_ALPHA:
355 swizzles[0] = SWIZZLE_ZERO;
356 swizzles[1] = SWIZZLE_ZERO;
357 swizzles[2] = SWIZZLE_ZERO;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_LUMINANCE:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_X;
363 swizzles[2] = SWIZZLE_X;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 case GL_INTENSITY:
367 swizzles[0] = SWIZZLE_X;
368 swizzles[1] = SWIZZLE_X;
369 swizzles[2] = SWIZZLE_X;
370 swizzles[3] = SWIZZLE_X;
371 break;
372 case GL_RED:
373 swizzles[0] = SWIZZLE_X;
374 swizzles[1] = SWIZZLE_ZERO;
375 swizzles[2] = SWIZZLE_ZERO;
376 swizzles[3] = SWIZZLE_ONE;
377 break;
378 }
379 }
380
381 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
382
383 /* If the texture's format is alpha-only, force R, G, and B to
384 * 0.0. Similarly, if the texture's format has no alpha channel,
385 * force the alpha value read to 1.0. This allows for the
386 * implementation to use an RGBA texture for any of these formats
387 * without leaking any unexpected values.
388 */
389 switch (img->_BaseFormat) {
390 case GL_ALPHA:
391 swizzles[0] = SWIZZLE_ZERO;
392 swizzles[1] = SWIZZLE_ZERO;
393 swizzles[2] = SWIZZLE_ZERO;
394 break;
395 case GL_LUMINANCE:
396 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
397 swizzles[0] = SWIZZLE_X;
398 swizzles[1] = SWIZZLE_X;
399 swizzles[2] = SWIZZLE_X;
400 swizzles[3] = SWIZZLE_ONE;
401 }
402 break;
403 case GL_LUMINANCE_ALPHA:
404 if (datatype == GL_SIGNED_NORMALIZED) {
405 swizzles[0] = SWIZZLE_X;
406 swizzles[1] = SWIZZLE_X;
407 swizzles[2] = SWIZZLE_X;
408 swizzles[3] = SWIZZLE_W;
409 }
410 break;
411 case GL_INTENSITY:
412 if (datatype == GL_SIGNED_NORMALIZED) {
413 swizzles[0] = SWIZZLE_X;
414 swizzles[1] = SWIZZLE_X;
415 swizzles[2] = SWIZZLE_X;
416 swizzles[3] = SWIZZLE_X;
417 }
418 break;
419 case GL_RED:
420 if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
421 swizzles[0] = SWIZZLE_X;
422 swizzles[1] = SWIZZLE_ZERO;
423 swizzles[2] = SWIZZLE_ZERO;
424 swizzles[3] = SWIZZLE_ONE;
425 break;
426 }
427 /* fallthrough */
428 case GL_RG:
429 case GL_RGB:
430 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
431 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
432 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
433 swizzles[3] = SWIZZLE_ONE;
434 break;
435 }
436
437 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
438 swizzles[GET_SWZ(t->_Swizzle, 1)],
439 swizzles[GET_SWZ(t->_Swizzle, 2)],
440 swizzles[GET_SWZ(t->_Swizzle, 3)]);
441 }
442
443 /**
444 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
445 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
446 *
447 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
448 * 0 1 2 3 4 5
449 * 4 5 6 7 0 1
450 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
451 *
452 * which is simply adding 4 then modding by 8 (or anding with 7).
453 *
454 * We then may need to apply workarounds for textureGather hardware bugs.
455 */
456 static unsigned
swizzle_to_scs(GLenum swizzle,bool need_green_to_blue)457 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
458 {
459 unsigned scs = (swizzle + 4) & 7;
460
461 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
462 }
463
brw_update_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset,bool for_gather,bool for_txf,uint32_t plane)464 static void brw_update_texture_surface(struct gl_context *ctx,
465 unsigned unit,
466 uint32_t *surf_offset,
467 bool for_gather,
468 bool for_txf,
469 uint32_t plane)
470 {
471 struct brw_context *brw = brw_context(ctx);
472 const struct gen_device_info *devinfo = &brw->screen->devinfo;
473 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
474
475 if (obj->Target == GL_TEXTURE_BUFFER) {
476 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
477
478 } else {
479 struct intel_texture_object *intel_obj = intel_texture_object(obj);
480 struct intel_mipmap_tree *mt = intel_obj->mt;
481
482 if (plane > 0) {
483 if (mt->plane[plane - 1] == NULL)
484 return;
485 mt = mt->plane[plane - 1];
486 }
487
488 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
489 /* If this is a view with restricted NumLayers, then our effective depth
490 * is not just the miptree depth.
491 */
492 unsigned view_num_layers;
493 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
494 view_num_layers = obj->NumLayers;
495 } else {
496 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
497 mt->surf.logical_level0_px.depth :
498 mt->surf.logical_level0_px.array_len;
499 }
500
501 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
502 * texturing functions that return a float, as our code generation always
503 * selects the .x channel (which would always be 0).
504 */
505 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
506 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
507 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
508 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
509 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
510 brw_get_texture_swizzle(&brw->ctx, obj));
511
512 mesa_format mesa_fmt;
513 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
514 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
515 /* The format from intel_obj may be a combined depth stencil format
516 * when we just want depth. Pull it from the miptree instead. This
517 * is safe because texture views aren't allowed on depth/stencil.
518 */
519 mesa_fmt = mt->format;
520 } else if (intel_miptree_has_etc_shadow(brw, mt)) {
521 mesa_fmt = mt->shadow_mt->format;
522 } else if (plane > 0) {
523 mesa_fmt = mt->format;
524 } else {
525 mesa_fmt = intel_obj->_Format;
526 }
527 enum isl_format format = translate_tex_format(brw, mesa_fmt,
528 for_txf ? GL_DECODE_EXT :
529 sampler->sRGBDecode);
530
531 /* Implement gen6 and gen7 gather work-around */
532 bool need_green_to_blue = false;
533 if (for_gather) {
534 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
535 format == ISL_FORMAT_R32G32_SINT ||
536 format == ISL_FORMAT_R32G32_UINT)) {
537 format = ISL_FORMAT_R32G32_FLOAT_LD;
538 need_green_to_blue = devinfo->is_haswell;
539 } else if (devinfo->gen == 6) {
540 /* Sandybridge's gather4 message is broken for integer formats.
541 * To work around this, we pretend the surface is UNORM for
542 * 8 or 16-bit formats, and emit shader instructions to recover
543 * the real INT/UINT value. For 32-bit formats, we pretend
544 * the surface is FLOAT, and simply reinterpret the resulting
545 * bits.
546 */
547 switch (format) {
548 case ISL_FORMAT_R8_SINT:
549 case ISL_FORMAT_R8_UINT:
550 format = ISL_FORMAT_R8_UNORM;
551 break;
552
553 case ISL_FORMAT_R16_SINT:
554 case ISL_FORMAT_R16_UINT:
555 format = ISL_FORMAT_R16_UNORM;
556 break;
557
558 case ISL_FORMAT_R32_SINT:
559 case ISL_FORMAT_R32_UINT:
560 format = ISL_FORMAT_R32_FLOAT;
561 break;
562
563 default:
564 break;
565 }
566 }
567 }
568
569 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
570 if (devinfo->gen <= 7) {
571 assert(mt->shadow_mt && !mt->stencil_mt->shadow_needs_update);
572 mt = mt->shadow_mt;
573 } else {
574 mt = mt->stencil_mt;
575 }
576 format = ISL_FORMAT_R8_UINT;
577 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
578 assert(mt->shadow_mt && !mt->shadow_needs_update);
579 mt = mt->shadow_mt;
580 format = ISL_FORMAT_R8_UINT;
581 } else if (intel_miptree_needs_fake_etc(brw, mt)) {
582 assert(mt->shadow_mt && !mt->shadow_needs_update);
583 mt = mt->shadow_mt;
584 }
585
586 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
587
588 struct isl_view view = {
589 .format = format,
590 .base_level = obj->MinLevel + obj->BaseLevel,
591 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
592 .base_array_layer = obj->MinLayer,
593 .array_len = view_num_layers,
594 .swizzle = {
595 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
596 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
597 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
598 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
599 },
600 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
601 };
602
603 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
604 * code. The actual surface swizzle should be identity.
605 */
606 if (devinfo->gen <= 7 && !devinfo->is_haswell)
607 view.swizzle = ISL_SWIZZLE_IDENTITY;
608
609 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
610 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
611 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
612
613 enum isl_aux_usage aux_usage =
614 intel_miptree_texture_aux_usage(brw, mt, format,
615 brw->gen9_astc5x5_wa_tex_mask);
616
617 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
618 surf_offset, surf_index,
619 0);
620 }
621 }
622
623 void
brw_emit_buffer_surface_state(struct brw_context * brw,uint32_t * out_offset,struct brw_bo * bo,unsigned buffer_offset,enum isl_format format,unsigned buffer_size,unsigned pitch,unsigned reloc_flags)624 brw_emit_buffer_surface_state(struct brw_context *brw,
625 uint32_t *out_offset,
626 struct brw_bo *bo,
627 unsigned buffer_offset,
628 enum isl_format format,
629 unsigned buffer_size,
630 unsigned pitch,
631 unsigned reloc_flags)
632 {
633 const struct gen_device_info *devinfo = &brw->screen->devinfo;
634 uint32_t *dw = brw_state_batch(brw,
635 brw->isl_dev.ss.size,
636 brw->isl_dev.ss.align,
637 out_offset);
638
639 isl_buffer_fill_state(&brw->isl_dev, dw,
640 .address = !bo ? buffer_offset :
641 brw_state_reloc(&brw->batch,
642 *out_offset + brw->isl_dev.ss.addr_offset,
643 bo, buffer_offset,
644 reloc_flags),
645 .size_B = buffer_size,
646 .format = format,
647 .swizzle = ISL_SWIZZLE_IDENTITY,
648 .stride_B = pitch,
649 .mocs = brw_get_bo_mocs(devinfo, bo));
650 }
651
652 static unsigned
buffer_texture_range_size(struct brw_context * brw,struct gl_texture_object * obj)653 buffer_texture_range_size(struct brw_context *brw,
654 struct gl_texture_object *obj)
655 {
656 assert(obj->Target == GL_TEXTURE_BUFFER);
657 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
658 const unsigned buffer_size = (!obj->BufferObject ? 0 :
659 obj->BufferObject->Size);
660 const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
661
662 /* The ARB_texture_buffer_specification says:
663 *
664 * "The number of texels in the buffer texture's texel array is given by
665 *
666 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
667 *
668 * where <buffer_size> is the size of the buffer object, in basic
669 * machine units and <components> and <base_type> are the element count
670 * and base data type for elements, as specified in Table X.1. The
671 * number of texels in the texel array is then clamped to the
672 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
673 *
674 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
675 * so that when ISL divides by stride to obtain the number of texels, that
676 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
677 */
678 return MIN3((unsigned)obj->BufferSize,
679 buffer_size - buffer_offset,
680 brw->ctx.Const.MaxTextureBufferSize * texel_size);
681 }
682
683 void
brw_update_buffer_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset)684 brw_update_buffer_texture_surface(struct gl_context *ctx,
685 unsigned unit,
686 uint32_t *surf_offset)
687 {
688 struct brw_context *brw = brw_context(ctx);
689 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
690 struct intel_buffer_object *intel_obj =
691 intel_buffer_object(tObj->BufferObject);
692 const unsigned size = buffer_texture_range_size(brw, tObj);
693 struct brw_bo *bo = NULL;
694 mesa_format format = tObj->_BufferObjectFormat;
695 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
696 int texel_size = _mesa_get_format_bytes(format);
697
698 if (intel_obj)
699 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
700 false);
701
702 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
703 _mesa_problem(NULL, "bad format %s for texture buffer\n",
704 _mesa_get_format_name(format));
705 }
706
707 brw_emit_buffer_surface_state(brw, surf_offset, bo,
708 tObj->BufferOffset,
709 isl_format,
710 size,
711 texel_size,
712 0);
713 }
714
715 /**
716 * Set up a binding table entry for use by stream output logic (transform
717 * feedback).
718 *
719 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
720 */
721 void
brw_update_sol_surface(struct brw_context * brw,struct gl_buffer_object * buffer_obj,uint32_t * out_offset,unsigned num_vector_components,unsigned stride_dwords,unsigned offset_dwords)722 brw_update_sol_surface(struct brw_context *brw,
723 struct gl_buffer_object *buffer_obj,
724 uint32_t *out_offset, unsigned num_vector_components,
725 unsigned stride_dwords, unsigned offset_dwords)
726 {
727 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
728 uint32_t offset_bytes = 4 * offset_dwords;
729 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
730 offset_bytes,
731 buffer_obj->Size - offset_bytes,
732 true);
733 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
734 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
735 size_t size_dwords = buffer_obj->Size / 4;
736 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
737
738 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
739 * too big to map using a single binding table entry?
740 */
741 assert((size_dwords - offset_dwords) / stride_dwords
742 <= BRW_MAX_NUM_BUFFER_ENTRIES);
743
744 if (size_dwords > offset_dwords + num_vector_components) {
745 /* There is room for at least 1 transform feedback output in the buffer.
746 * Compute the number of additional transform feedback outputs the
747 * buffer has room for.
748 */
749 buffer_size_minus_1 =
750 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
751 } else {
752 /* There isn't even room for a single transform feedback output in the
753 * buffer. We can't configure the binding table entry to prevent output
754 * entirely; we'll have to rely on the geometry shader to detect
755 * overflow. But to minimize the damage in case of a bug, set up the
756 * binding table entry to just allow a single output.
757 */
758 buffer_size_minus_1 = 0;
759 }
760 width = buffer_size_minus_1 & 0x7f;
761 height = (buffer_size_minus_1 & 0xfff80) >> 7;
762 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
763
764 switch (num_vector_components) {
765 case 1:
766 surface_format = ISL_FORMAT_R32_FLOAT;
767 break;
768 case 2:
769 surface_format = ISL_FORMAT_R32G32_FLOAT;
770 break;
771 case 3:
772 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
773 break;
774 case 4:
775 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
776 break;
777 default:
778 unreachable("Invalid vector size for transform feedback output");
779 }
780
781 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
782 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
783 surface_format << BRW_SURFACE_FORMAT_SHIFT |
784 BRW_SURFACE_RC_READ_WRITE;
785 surf[1] = brw_state_reloc(&brw->batch,
786 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
787 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
788 height << BRW_SURFACE_HEIGHT_SHIFT);
789 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
790 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
791 surf[4] = 0;
792 surf[5] = 0;
793 }
794
795 /* Creates a new WM constant buffer reflecting the current fragment program's
796 * constants, if needed by the fragment program.
797 *
798 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
799 * state atom.
800 */
801 static void
brw_upload_wm_pull_constants(struct brw_context * brw)802 brw_upload_wm_pull_constants(struct brw_context *brw)
803 {
804 struct brw_stage_state *stage_state = &brw->wm.base;
805 /* BRW_NEW_FRAGMENT_PROGRAM */
806 struct brw_program *fp =
807 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
808
809 /* BRW_NEW_FS_PROG_DATA */
810 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
811
812 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
813 /* _NEW_PROGRAM_CONSTANTS */
814 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
815 stage_state, prog_data);
816 }
817
818 const struct brw_tracked_state brw_wm_pull_constants = {
819 .dirty = {
820 .mesa = _NEW_PROGRAM_CONSTANTS,
821 .brw = BRW_NEW_BATCH |
822 BRW_NEW_FRAGMENT_PROGRAM |
823 BRW_NEW_FS_PROG_DATA,
824 },
825 .emit = brw_upload_wm_pull_constants,
826 };
827
828 /**
829 * Creates a null renderbuffer surface.
830 *
831 * This is used when the shader doesn't write to any color output. An FB
832 * write to target 0 will still be emitted, because that's how the thread is
833 * terminated (and computed depth is returned), so we need to have the
834 * hardware discard the target 0 color output..
835 */
836 static void
emit_null_surface_state(struct brw_context * brw,const struct gl_framebuffer * fb,uint32_t * out_offset)837 emit_null_surface_state(struct brw_context *brw,
838 const struct gl_framebuffer *fb,
839 uint32_t *out_offset)
840 {
841 const struct gen_device_info *devinfo = &brw->screen->devinfo;
842 uint32_t *surf = brw_state_batch(brw,
843 brw->isl_dev.ss.size,
844 brw->isl_dev.ss.align,
845 out_offset);
846
847 /* Use the fb dimensions or 1x1x1 */
848 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
849 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
850 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
851
852 if (devinfo->gen != 6 || samples <= 1) {
853 isl_null_fill_state(&brw->isl_dev, surf,
854 isl_extent3d(width, height, 1));
855 return;
856 }
857
858 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
859 * So work around this problem by rendering into dummy color buffer.
860 *
861 * To decrease the amount of memory needed by the workaround buffer, we
862 * set its pitch to 128 bytes (the width of a Y tile). This means that
863 * the amount of memory needed for the workaround buffer is
864 * (width_in_tiles + height_in_tiles - 1) tiles.
865 *
866 * Note that since the workaround buffer will be interpreted by the
867 * hardware as an interleaved multisampled buffer, we need to compute
868 * width_in_tiles and height_in_tiles by dividing the width and height
869 * by 16 rather than the normal Y-tile size of 32.
870 */
871 unsigned width_in_tiles = ALIGN(width, 16) / 16;
872 unsigned height_in_tiles = ALIGN(height, 16) / 16;
873 unsigned pitch_minus_1 = 127;
874 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
875 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
876 size_needed);
877
878 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
879 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
880 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
881 brw->wm.multisampled_null_render_target_bo,
882 0, RELOC_WRITE);
883
884 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
885 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
886
887 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
888 * Notes):
889 *
890 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
891 */
892 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
893 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
894 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
895 surf[5] = 0;
896 }
897
898 /**
899 * Sets up a surface state structure to point at the given region.
900 * While it is only used for the front/back buffer currently, it should be
901 * usable for further buffers when doing ARB_draw_buffer support.
902 */
903 static uint32_t
gen4_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,unsigned unit,uint32_t surf_index)904 gen4_update_renderbuffer_surface(struct brw_context *brw,
905 struct gl_renderbuffer *rb,
906 unsigned unit,
907 uint32_t surf_index)
908 {
909 const struct gen_device_info *devinfo = &brw->screen->devinfo;
910 struct gl_context *ctx = &brw->ctx;
911 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
912 struct intel_mipmap_tree *mt = irb->mt;
913 uint32_t *surf;
914 uint32_t tile_x, tile_y;
915 enum isl_format format;
916 uint32_t offset;
917 /* _NEW_BUFFERS */
918 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
919 /* BRW_NEW_FS_PROG_DATA */
920
921 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
922 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
923
924 if (tile_x != 0 || tile_y != 0) {
925 /* Original gen4 hardware couldn't draw to a non-tile-aligned
926 * destination in a miptree unless you actually setup your renderbuffer
927 * as a miptree and used the fragile lod/array_index/etc. controls to
928 * select the image. So, instead, we just make a new single-level
929 * miptree and render into that.
930 */
931 intel_renderbuffer_move_to_temp(brw, irb, false);
932 assert(irb->align_wa_mt);
933 mt = irb->align_wa_mt;
934 }
935 }
936
937 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
938
939 format = brw->mesa_to_isl_render_format[rb_format];
940 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
941 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
942 __func__, _mesa_get_format_name(rb_format));
943 }
944
945 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
946 format << BRW_SURFACE_FORMAT_SHIFT);
947
948 /* reloc */
949 assert(mt->offset % mt->cpp == 0);
950 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
951 mt->offset +
952 intel_renderbuffer_get_tile_offsets(irb,
953 &tile_x,
954 &tile_y),
955 RELOC_WRITE);
956
957 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
958 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
959
960 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
961 (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
962
963 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
964
965 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
966 /* Note that the low bits of these fields are missing, so
967 * there's the possibility of getting in trouble.
968 */
969 assert(tile_x % 4 == 0);
970 assert(tile_y % 2 == 0);
971 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
972 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
973 (mt->surf.image_alignment_el.height == 4 ?
974 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
975
976 if (devinfo->gen < 6) {
977 /* _NEW_COLOR */
978 if (!ctx->Color.ColorLogicOpEnabled &&
979 ctx->Color._AdvancedBlendMode == BLEND_NONE &&
980 (ctx->Color.BlendEnabled & (1 << unit)))
981 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
982
983 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
984 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
985 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
986 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
987 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
988 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
989
990 /* As mentioned above, disable writes to the alpha component when the
991 * renderbuffer is XRGB.
992 */
993 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
994 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
995 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
996 }
997 }
998
999 return offset;
1000 }
1001
1002 static void
update_renderbuffer_surfaces(struct brw_context * brw)1003 update_renderbuffer_surfaces(struct brw_context *brw)
1004 {
1005 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1006 const struct gl_context *ctx = &brw->ctx;
1007
1008 /* _NEW_BUFFERS | _NEW_COLOR */
1009 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1010
1011 /* Render targets always start at binding table index 0. */
1012 const unsigned rt_start = 0;
1013
1014 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1015
1016 /* Update surfaces for drawing buffers */
1017 if (fb->_NumColorDrawBuffers >= 1) {
1018 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1019 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1020
1021 if (intel_renderbuffer(rb)) {
1022 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1023 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1024 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1025 } else {
1026 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1027 }
1028 }
1029 } else {
1030 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1031 }
1032
1033 /* The PIPE_CONTROL command description says:
1034 *
1035 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1036 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1037 * Target Cache Flush by enabling this bit. When render target flush
1038 * is set due to new association of BTI, PS Scoreboard Stall bit must
1039 * be set in this packet."
1040 */
1041 if (devinfo->gen >= 11) {
1042 brw_emit_pipe_control_flush(brw,
1043 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1044 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1045 }
1046
1047 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1048 }
1049
1050 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1051 .dirty = {
1052 .mesa = _NEW_BUFFERS |
1053 _NEW_COLOR,
1054 .brw = BRW_NEW_BATCH,
1055 },
1056 .emit = update_renderbuffer_surfaces,
1057 };
1058
1059 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1060 .dirty = {
1061 .mesa = _NEW_BUFFERS,
1062 .brw = BRW_NEW_BATCH |
1063 BRW_NEW_AUX_STATE,
1064 },
1065 .emit = update_renderbuffer_surfaces,
1066 };
1067
1068 static void
update_renderbuffer_read_surfaces(struct brw_context * brw)1069 update_renderbuffer_read_surfaces(struct brw_context *brw)
1070 {
1071 const struct gl_context *ctx = &brw->ctx;
1072
1073 /* BRW_NEW_FS_PROG_DATA */
1074 const struct brw_wm_prog_data *wm_prog_data =
1075 brw_wm_prog_data(brw->wm.base.prog_data);
1076
1077 if (wm_prog_data->has_render_target_reads &&
1078 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1079 /* _NEW_BUFFERS */
1080 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1081
1082 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1083 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1084 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1085 const unsigned surf_index =
1086 wm_prog_data->binding_table.render_target_read_start + i;
1087 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1088
1089 if (irb) {
1090 const enum isl_format format = brw->mesa_to_isl_render_format[
1091 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1092 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1093 format));
1094
1095 /* Override the target of the texture if the render buffer is a
1096 * single slice of a 3D texture (since the minimum array element
1097 * field of the surface state structure is ignored by the sampler
1098 * unit for 3D textures on some hardware), or if the render buffer
1099 * is a 1D array (since shaders always provide the array index
1100 * coordinate at the Z component to avoid state-dependent
1101 * recompiles when changing the texture target of the
1102 * framebuffer).
1103 */
1104 const GLenum target =
1105 (irb->mt->target == GL_TEXTURE_3D &&
1106 irb->layer_count == 1) ? GL_TEXTURE_2D :
1107 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1108 irb->mt->target;
1109
1110 const struct isl_view view = {
1111 .format = format,
1112 .base_level = irb->mt_level - irb->mt->first_level,
1113 .levels = 1,
1114 .base_array_layer = irb->mt_layer,
1115 .array_len = irb->layer_count,
1116 .swizzle = ISL_SWIZZLE_IDENTITY,
1117 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1118 };
1119
1120 enum isl_aux_usage aux_usage =
1121 intel_miptree_texture_aux_usage(brw, irb->mt, format,
1122 brw->gen9_astc5x5_wa_tex_mask);
1123 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1124 aux_usage = ISL_AUX_USAGE_NONE;
1125
1126 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1127 surf_offset, surf_index,
1128 0);
1129
1130 } else {
1131 emit_null_surface_state(brw, fb, surf_offset);
1132 }
1133 }
1134
1135 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1136 }
1137 }
1138
1139 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1140 .dirty = {
1141 .mesa = _NEW_BUFFERS,
1142 .brw = BRW_NEW_BATCH |
1143 BRW_NEW_AUX_STATE |
1144 BRW_NEW_FS_PROG_DATA,
1145 },
1146 .emit = update_renderbuffer_read_surfaces,
1147 };
1148
1149 static bool
is_depth_texture(struct intel_texture_object * iobj)1150 is_depth_texture(struct intel_texture_object *iobj)
1151 {
1152 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1153 return base_format == GL_DEPTH_COMPONENT ||
1154 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1155 }
1156
1157 static void
update_stage_texture_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,bool for_gather,uint32_t plane)1158 update_stage_texture_surfaces(struct brw_context *brw,
1159 const struct gl_program *prog,
1160 struct brw_stage_state *stage_state,
1161 bool for_gather, uint32_t plane)
1162 {
1163 if (!prog)
1164 return;
1165
1166 struct gl_context *ctx = &brw->ctx;
1167
1168 uint32_t *surf_offset = stage_state->surf_offset;
1169
1170 /* BRW_NEW_*_PROG_DATA */
1171 if (for_gather)
1172 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1173 else
1174 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1175
1176 unsigned num_samplers = util_last_bit(prog->info.textures_used);
1177 for (unsigned s = 0; s < num_samplers; s++) {
1178 surf_offset[s] = 0;
1179
1180 if (prog->info.textures_used & (1 << s)) {
1181 const unsigned unit = prog->SamplerUnits[s];
1182 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1183 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1184 struct intel_texture_object *iobj = intel_texture_object(obj);
1185
1186 /* _NEW_TEXTURE */
1187 if (!obj)
1188 continue;
1189
1190 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1191 /* A programming note for the sample_c message says:
1192 *
1193 * "The Surface Format of the associated surface must be
1194 * indicated as supporting shadow mapping as indicated in the
1195 * surface format table."
1196 *
1197 * Accessing non-depth textures via a sampler*Shadow type is
1198 * undefined. GLSL 4.50 page 162 says:
1199 *
1200 * "If a shadow texture call is made to a sampler that does not
1201 * represent a depth texture, then results are undefined."
1202 *
1203 * We give them a null surface (zeros) for undefined. We've seen
1204 * GPU hangs with color buffers and sample_c, so we try and avoid
1205 * those with this hack.
1206 */
1207 emit_null_surface_state(brw, NULL, surf_offset + s);
1208 } else {
1209 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1210 used_by_txf, plane);
1211 }
1212 }
1213 }
1214 }
1215
1216
1217 /**
1218 * Construct SURFACE_STATE objects for enabled textures.
1219 */
1220 static void
brw_update_texture_surfaces(struct brw_context * brw)1221 brw_update_texture_surfaces(struct brw_context *brw)
1222 {
1223 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1224
1225 /* BRW_NEW_VERTEX_PROGRAM */
1226 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1227
1228 /* BRW_NEW_TESS_PROGRAMS */
1229 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1230 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1231
1232 /* BRW_NEW_GEOMETRY_PROGRAM */
1233 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1234
1235 /* BRW_NEW_FRAGMENT_PROGRAM */
1236 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1237
1238 /* _NEW_TEXTURE */
1239 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1240 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1241 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1242 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1243 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1244
1245 /* emit alternate set of surface state for gather. this
1246 * allows the surface format to be overriden for only the
1247 * gather4 messages. */
1248 if (devinfo->gen < 8) {
1249 if (vs && vs->info.uses_texture_gather)
1250 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1251 if (tcs && tcs->info.uses_texture_gather)
1252 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1253 if (tes && tes->info.uses_texture_gather)
1254 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1255 if (gs && gs->info.uses_texture_gather)
1256 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1257 if (fs && fs->info.uses_texture_gather)
1258 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1259 }
1260
1261 if (fs) {
1262 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1263 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1264 }
1265
1266 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1267 }
1268
1269 const struct brw_tracked_state brw_texture_surfaces = {
1270 .dirty = {
1271 .mesa = _NEW_TEXTURE,
1272 .brw = BRW_NEW_BATCH |
1273 BRW_NEW_AUX_STATE |
1274 BRW_NEW_FRAGMENT_PROGRAM |
1275 BRW_NEW_FS_PROG_DATA |
1276 BRW_NEW_GEOMETRY_PROGRAM |
1277 BRW_NEW_GS_PROG_DATA |
1278 BRW_NEW_TESS_PROGRAMS |
1279 BRW_NEW_TCS_PROG_DATA |
1280 BRW_NEW_TES_PROG_DATA |
1281 BRW_NEW_TEXTURE_BUFFER |
1282 BRW_NEW_VERTEX_PROGRAM |
1283 BRW_NEW_VS_PROG_DATA,
1284 },
1285 .emit = brw_update_texture_surfaces,
1286 };
1287
1288 static void
brw_update_cs_texture_surfaces(struct brw_context * brw)1289 brw_update_cs_texture_surfaces(struct brw_context *brw)
1290 {
1291 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1292
1293 /* BRW_NEW_COMPUTE_PROGRAM */
1294 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1295
1296 /* _NEW_TEXTURE */
1297 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1298
1299 /* emit alternate set of surface state for gather. this
1300 * allows the surface format to be overriden for only the
1301 * gather4 messages.
1302 */
1303 if (devinfo->gen < 8) {
1304 if (cs && cs->info.uses_texture_gather)
1305 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1306 }
1307
1308 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1309 }
1310
1311 const struct brw_tracked_state brw_cs_texture_surfaces = {
1312 .dirty = {
1313 .mesa = _NEW_TEXTURE,
1314 .brw = BRW_NEW_BATCH |
1315 BRW_NEW_COMPUTE_PROGRAM |
1316 BRW_NEW_AUX_STATE,
1317 },
1318 .emit = brw_update_cs_texture_surfaces,
1319 };
1320
1321 static void
upload_buffer_surface(struct brw_context * brw,struct gl_buffer_binding * binding,uint32_t * out_offset,enum isl_format format,unsigned reloc_flags)1322 upload_buffer_surface(struct brw_context *brw,
1323 struct gl_buffer_binding *binding,
1324 uint32_t *out_offset,
1325 enum isl_format format,
1326 unsigned reloc_flags)
1327 {
1328 if (!binding->BufferObject) {
1329 emit_null_surface_state(brw, NULL, out_offset);
1330 } else {
1331 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1332 if (!binding->AutomaticSize)
1333 size = MIN2(size, binding->Size);
1334
1335 if (size == 0) {
1336 emit_null_surface_state(brw, NULL, out_offset);
1337 return;
1338 }
1339
1340 struct intel_buffer_object *iobj =
1341 intel_buffer_object(binding->BufferObject);
1342 struct brw_bo *bo =
1343 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1344 (reloc_flags & RELOC_WRITE) != 0);
1345
1346 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1347 format, size, 1, reloc_flags);
1348 }
1349 }
1350
1351 void
brw_upload_ubo_surfaces(struct brw_context * brw,struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1352 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1353 struct brw_stage_state *stage_state,
1354 struct brw_stage_prog_data *prog_data)
1355 {
1356 struct gl_context *ctx = &brw->ctx;
1357
1358 if (!prog || (prog->info.num_ubos == 0 &&
1359 prog->info.num_ssbos == 0 &&
1360 prog->info.num_abos == 0))
1361 return;
1362
1363 if (prog->info.num_ubos) {
1364 assert(prog_data->binding_table.ubo_start < BRW_MAX_SURFACES);
1365 uint32_t *ubo_surf_offsets =
1366 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1367
1368 for (int i = 0; i < prog->info.num_ubos; i++) {
1369 struct gl_buffer_binding *binding =
1370 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1371 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1372 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1373 }
1374 }
1375
1376 if (prog->info.num_ssbos || prog->info.num_abos) {
1377 assert(prog_data->binding_table.ssbo_start < BRW_MAX_SURFACES);
1378 uint32_t *ssbo_surf_offsets =
1379 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1380 uint32_t *abo_surf_offsets = ssbo_surf_offsets + prog->info.num_ssbos;
1381
1382 for (int i = 0; i < prog->info.num_abos; i++) {
1383 struct gl_buffer_binding *binding =
1384 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1385 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1386 ISL_FORMAT_RAW, RELOC_WRITE);
1387 }
1388
1389 for (int i = 0; i < prog->info.num_ssbos; i++) {
1390 struct gl_buffer_binding *binding =
1391 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1392
1393 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1394 ISL_FORMAT_RAW, RELOC_WRITE);
1395 }
1396 }
1397
1398 stage_state->push_constants_dirty = true;
1399 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1400 }
1401
1402 static void
brw_upload_wm_ubo_surfaces(struct brw_context * brw)1403 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1404 {
1405 struct gl_context *ctx = &brw->ctx;
1406 /* _NEW_PROGRAM */
1407 struct gl_program *prog = ctx->FragmentProgram._Current;
1408
1409 /* BRW_NEW_FS_PROG_DATA */
1410 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1411 }
1412
1413 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1414 .dirty = {
1415 .mesa = _NEW_PROGRAM,
1416 .brw = BRW_NEW_BATCH |
1417 BRW_NEW_FS_PROG_DATA |
1418 BRW_NEW_UNIFORM_BUFFER,
1419 },
1420 .emit = brw_upload_wm_ubo_surfaces,
1421 };
1422
1423 static void
brw_upload_cs_ubo_surfaces(struct brw_context * brw)1424 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1425 {
1426 struct gl_context *ctx = &brw->ctx;
1427 /* _NEW_PROGRAM */
1428 struct gl_program *prog =
1429 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1430
1431 /* BRW_NEW_CS_PROG_DATA */
1432 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1433 }
1434
1435 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1436 .dirty = {
1437 .mesa = _NEW_PROGRAM,
1438 .brw = BRW_NEW_BATCH |
1439 BRW_NEW_CS_PROG_DATA |
1440 BRW_NEW_UNIFORM_BUFFER,
1441 },
1442 .emit = brw_upload_cs_ubo_surfaces,
1443 };
1444
1445 static void
brw_upload_cs_image_surfaces(struct brw_context * brw)1446 brw_upload_cs_image_surfaces(struct brw_context *brw)
1447 {
1448 /* _NEW_PROGRAM */
1449 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1450
1451 if (cp) {
1452 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1453 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1454 brw->cs.base.prog_data);
1455 }
1456 }
1457
1458 const struct brw_tracked_state brw_cs_image_surfaces = {
1459 .dirty = {
1460 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1461 .brw = BRW_NEW_BATCH |
1462 BRW_NEW_CS_PROG_DATA |
1463 BRW_NEW_AUX_STATE |
1464 BRW_NEW_IMAGE_UNITS
1465 },
1466 .emit = brw_upload_cs_image_surfaces,
1467 };
1468
1469 static uint32_t
get_image_format(struct brw_context * brw,mesa_format format,GLenum access)1470 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1471 {
1472 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1473 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1474 if (access == GL_WRITE_ONLY || access == GL_NONE) {
1475 return hw_format;
1476 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1477 /* Typed surface reads support a very limited subset of the shader
1478 * image formats. Translate it into the closest format the
1479 * hardware supports.
1480 */
1481 return isl_lower_storage_image_format(devinfo, hw_format);
1482 } else {
1483 /* The hardware doesn't actually support a typed format that we can use
1484 * so we have to fall back to untyped read/write messages.
1485 */
1486 return ISL_FORMAT_RAW;
1487 }
1488 }
1489
1490 static void
update_default_image_param(struct brw_context * brw,struct gl_image_unit * u,struct brw_image_param * param)1491 update_default_image_param(struct brw_context *brw,
1492 struct gl_image_unit *u,
1493 struct brw_image_param *param)
1494 {
1495 memset(param, 0, sizeof(*param));
1496 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1497 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1498 * detailed explanation of these parameters.
1499 */
1500 param->swizzling[0] = 0xff;
1501 param->swizzling[1] = 0xff;
1502 }
1503
1504 static void
update_buffer_image_param(struct brw_context * brw,struct gl_image_unit * u,struct brw_image_param * param)1505 update_buffer_image_param(struct brw_context *brw,
1506 struct gl_image_unit *u,
1507 struct brw_image_param *param)
1508 {
1509 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1510 update_default_image_param(brw, u, param);
1511
1512 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1513 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1514 }
1515
1516 static void
update_image_surface(struct brw_context * brw,struct gl_image_unit * u,GLenum access,uint32_t * surf_offset,struct brw_image_param * param)1517 update_image_surface(struct brw_context *brw,
1518 struct gl_image_unit *u,
1519 GLenum access,
1520 uint32_t *surf_offset,
1521 struct brw_image_param *param)
1522 {
1523 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1524 struct gl_texture_object *obj = u->TexObj;
1525 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1526 const bool written = (access != GL_READ_ONLY && access != GL_NONE);
1527
1528 if (obj->Target == GL_TEXTURE_BUFFER) {
1529 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1530 _mesa_get_format_bytes(u->_ActualFormat));
1531 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1532 struct brw_bo *const bo = !obj->BufferObject ? NULL :
1533 intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
1534 obj->BufferOffset, buffer_size, written);
1535
1536 brw_emit_buffer_surface_state(
1537 brw, surf_offset, bo, obj->BufferOffset,
1538 format, buffer_size, texel_size,
1539 written ? RELOC_WRITE : 0);
1540
1541 update_buffer_image_param(brw, u, param);
1542
1543 } else {
1544 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1545 struct intel_mipmap_tree *mt = intel_obj->mt;
1546
1547 unsigned base_layer, num_layers;
1548 if (u->Layered) {
1549 if (obj->Target == GL_TEXTURE_3D) {
1550 base_layer = 0;
1551 num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
1552 } else {
1553 assert(obj->Immutable || obj->MinLayer == 0);
1554 base_layer = obj->MinLayer;
1555 num_layers = obj->Immutable ?
1556 obj->NumLayers :
1557 mt->surf.logical_level0_px.array_len;
1558 }
1559 } else {
1560 base_layer = obj->MinLayer + u->_Layer;
1561 num_layers = 1;
1562 }
1563
1564 struct isl_view view = {
1565 .format = format,
1566 .base_level = obj->MinLevel + u->Level,
1567 .levels = 1,
1568 .base_array_layer = base_layer,
1569 .array_len = num_layers,
1570 .swizzle = ISL_SWIZZLE_IDENTITY,
1571 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1572 };
1573
1574 if (format == ISL_FORMAT_RAW) {
1575 brw_emit_buffer_surface_state(
1576 brw, surf_offset, mt->bo, mt->offset,
1577 format, mt->bo->size - mt->offset, 1 /* pitch */,
1578 written ? RELOC_WRITE : 0);
1579
1580 } else {
1581 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1582 assert(!intel_miptree_has_color_unresolved(mt,
1583 view.base_level, 1,
1584 view.base_array_layer,
1585 view.array_len));
1586 brw_emit_surface_state(brw, mt, mt->target, view,
1587 ISL_AUX_USAGE_NONE,
1588 surf_offset, surf_index,
1589 written ? RELOC_WRITE : 0);
1590 }
1591
1592 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1593 }
1594
1595 } else {
1596 emit_null_surface_state(brw, NULL, surf_offset);
1597 update_default_image_param(brw, u, param);
1598 }
1599 }
1600
1601 void
brw_upload_image_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1602 brw_upload_image_surfaces(struct brw_context *brw,
1603 const struct gl_program *prog,
1604 struct brw_stage_state *stage_state,
1605 struct brw_stage_prog_data *prog_data)
1606 {
1607 assert(prog);
1608 struct gl_context *ctx = &brw->ctx;
1609
1610 if (prog->info.num_images) {
1611 for (unsigned i = 0; i < prog->info.num_images; i++) {
1612 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1613 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1614
1615 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1616 &stage_state->surf_offset[surf_idx],
1617 &stage_state->image_param[i]);
1618 }
1619
1620 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1621 /* This may have changed the image metadata dependent on the context
1622 * image unit state and passed to the program as uniforms, make sure
1623 * that push and pull constants are reuploaded.
1624 */
1625 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1626 }
1627 }
1628
1629 static void
brw_upload_wm_image_surfaces(struct brw_context * brw)1630 brw_upload_wm_image_surfaces(struct brw_context *brw)
1631 {
1632 /* BRW_NEW_FRAGMENT_PROGRAM */
1633 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1634
1635 if (wm) {
1636 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1637 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1638 brw->wm.base.prog_data);
1639 }
1640 }
1641
1642 const struct brw_tracked_state brw_wm_image_surfaces = {
1643 .dirty = {
1644 .mesa = _NEW_TEXTURE,
1645 .brw = BRW_NEW_BATCH |
1646 BRW_NEW_AUX_STATE |
1647 BRW_NEW_FRAGMENT_PROGRAM |
1648 BRW_NEW_FS_PROG_DATA |
1649 BRW_NEW_IMAGE_UNITS
1650 },
1651 .emit = brw_upload_wm_image_surfaces,
1652 };
1653
1654 static void
brw_upload_cs_work_groups_surface(struct brw_context * brw)1655 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1656 {
1657 struct gl_context *ctx = &brw->ctx;
1658 /* _NEW_PROGRAM */
1659 struct gl_program *prog =
1660 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1661 /* BRW_NEW_CS_PROG_DATA */
1662 const struct brw_cs_prog_data *cs_prog_data =
1663 brw_cs_prog_data(brw->cs.base.prog_data);
1664
1665 if (prog && cs_prog_data->uses_num_work_groups) {
1666 const unsigned surf_idx =
1667 cs_prog_data->binding_table.work_groups_start;
1668 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1669 struct brw_bo *bo;
1670 uint32_t bo_offset;
1671
1672 if (brw->compute.num_work_groups_bo == NULL) {
1673 bo = NULL;
1674 brw_upload_data(&brw->upload,
1675 (void *)brw->compute.num_work_groups,
1676 3 * sizeof(GLuint),
1677 sizeof(GLuint),
1678 &bo,
1679 &bo_offset);
1680 } else {
1681 bo = brw->compute.num_work_groups_bo;
1682 bo_offset = brw->compute.num_work_groups_offset;
1683 }
1684
1685 brw_emit_buffer_surface_state(brw, surf_offset,
1686 bo, bo_offset,
1687 ISL_FORMAT_RAW,
1688 3 * sizeof(GLuint), 1,
1689 RELOC_WRITE);
1690
1691 /* The state buffer now holds a reference to our upload, drop ours. */
1692 if (bo != brw->compute.num_work_groups_bo)
1693 brw_bo_unreference(bo);
1694
1695 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1696 }
1697 }
1698
1699 const struct brw_tracked_state brw_cs_work_groups_surface = {
1700 .dirty = {
1701 .brw = BRW_NEW_CS_PROG_DATA |
1702 BRW_NEW_CS_WORK_GROUPS
1703 },
1704 .emit = brw_upload_cs_work_groups_surface,
1705 };
1706