1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
brw_emit_surface_state(struct brw_context * brw,struct intel_mipmap_tree * mt,uint32_t flags,GLenum target,struct isl_view view,uint32_t mocs,uint32_t * surf_offset,int surf_index,unsigned read_domains,unsigned write_domains)76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_index, surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 drm_intel_bo_emit_reloc(brw->batch.bo,
172 *surf_offset + brw->isl_dev.ss.addr_offset,
173 mt->bo, offset,
174 read_domains, write_domains);
175
176 if (aux_surf) {
177 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
178 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
179 * contain other control information. Since buffer addresses are always
180 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
181 * an ordinary reloc to do the necessary address translation.
182 */
183 assert((aux_offset & 0xfff) == 0);
184 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
185 drm_intel_bo_emit_reloc(brw->batch.bo,
186 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
187 aux_bo, *aux_addr - aux_bo->offset64,
188 read_domains, write_domains);
189 }
190 }
191
192 uint32_t
brw_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,uint32_t flags,unsigned unit,uint32_t surf_index)193 brw_update_renderbuffer_surface(struct brw_context *brw,
194 struct gl_renderbuffer *rb,
195 uint32_t flags, unsigned unit /* unused */,
196 uint32_t surf_index)
197 {
198 struct gl_context *ctx = &brw->ctx;
199 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
200 struct intel_mipmap_tree *mt = irb->mt;
201
202 if (brw->gen < 9) {
203 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
204 }
205
206 assert(brw_render_target_supported(brw, rb));
207
208 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
209 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
210 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
211 __func__, _mesa_get_format_name(rb_format));
212 }
213
214 const unsigned layer_multiplier =
215 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
216 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
217 MAX2(irb->mt->num_samples, 1) : 1;
218
219 struct isl_view view = {
220 .format = brw->render_target_format[rb_format],
221 .base_level = irb->mt_level - irb->mt->first_level,
222 .levels = 1,
223 .base_array_layer = irb->mt_layer / layer_multiplier,
224 .array_len = MAX2(irb->layer_count, 1),
225 .swizzle = ISL_SWIZZLE_IDENTITY,
226 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
227 };
228
229 uint32_t offset;
230 brw_emit_surface_state(brw, mt, flags, mt->target, view,
231 rb_mocs[brw->gen],
232 &offset, surf_index,
233 I915_GEM_DOMAIN_RENDER,
234 I915_GEM_DOMAIN_RENDER);
235 return offset;
236 }
237
238 GLuint
translate_tex_target(GLenum target)239 translate_tex_target(GLenum target)
240 {
241 switch (target) {
242 case GL_TEXTURE_1D:
243 case GL_TEXTURE_1D_ARRAY_EXT:
244 return BRW_SURFACE_1D;
245
246 case GL_TEXTURE_RECTANGLE_NV:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_2D:
250 case GL_TEXTURE_2D_ARRAY_EXT:
251 case GL_TEXTURE_EXTERNAL_OES:
252 case GL_TEXTURE_2D_MULTISAMPLE:
253 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
254 return BRW_SURFACE_2D;
255
256 case GL_TEXTURE_3D:
257 return BRW_SURFACE_3D;
258
259 case GL_TEXTURE_CUBE_MAP:
260 case GL_TEXTURE_CUBE_MAP_ARRAY:
261 return BRW_SURFACE_CUBE;
262
263 default:
264 unreachable("not reached");
265 }
266 }
267
268 uint32_t
brw_get_surface_tiling_bits(uint32_t tiling)269 brw_get_surface_tiling_bits(uint32_t tiling)
270 {
271 switch (tiling) {
272 case I915_TILING_X:
273 return BRW_SURFACE_TILED;
274 case I915_TILING_Y:
275 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
276 default:
277 return 0;
278 }
279 }
280
281
282 uint32_t
brw_get_surface_num_multisamples(unsigned num_samples)283 brw_get_surface_num_multisamples(unsigned num_samples)
284 {
285 if (num_samples > 1)
286 return BRW_SURFACE_MULTISAMPLECOUNT_4;
287 else
288 return BRW_SURFACE_MULTISAMPLECOUNT_1;
289 }
290
291 /**
292 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
293 * swizzling.
294 */
295 int
brw_get_texture_swizzle(const struct gl_context * ctx,const struct gl_texture_object * t)296 brw_get_texture_swizzle(const struct gl_context *ctx,
297 const struct gl_texture_object *t)
298 {
299 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
300
301 int swizzles[SWIZZLE_NIL + 1] = {
302 SWIZZLE_X,
303 SWIZZLE_Y,
304 SWIZZLE_Z,
305 SWIZZLE_W,
306 SWIZZLE_ZERO,
307 SWIZZLE_ONE,
308 SWIZZLE_NIL
309 };
310
311 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
312 img->_BaseFormat == GL_DEPTH_STENCIL) {
313 GLenum depth_mode = t->DepthMode;
314
315 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316 * with depth component data specified with a sized internal format.
317 * Otherwise, it's left at the old default, GL_LUMINANCE.
318 */
319 if (_mesa_is_gles3(ctx) &&
320 img->InternalFormat != GL_DEPTH_COMPONENT &&
321 img->InternalFormat != GL_DEPTH_STENCIL) {
322 depth_mode = GL_RED;
323 }
324
325 switch (depth_mode) {
326 case GL_ALPHA:
327 swizzles[0] = SWIZZLE_ZERO;
328 swizzles[1] = SWIZZLE_ZERO;
329 swizzles[2] = SWIZZLE_ZERO;
330 swizzles[3] = SWIZZLE_X;
331 break;
332 case GL_LUMINANCE:
333 swizzles[0] = SWIZZLE_X;
334 swizzles[1] = SWIZZLE_X;
335 swizzles[2] = SWIZZLE_X;
336 swizzles[3] = SWIZZLE_ONE;
337 break;
338 case GL_INTENSITY:
339 swizzles[0] = SWIZZLE_X;
340 swizzles[1] = SWIZZLE_X;
341 swizzles[2] = SWIZZLE_X;
342 swizzles[3] = SWIZZLE_X;
343 break;
344 case GL_RED:
345 swizzles[0] = SWIZZLE_X;
346 swizzles[1] = SWIZZLE_ZERO;
347 swizzles[2] = SWIZZLE_ZERO;
348 swizzles[3] = SWIZZLE_ONE;
349 break;
350 }
351 }
352
353 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
354
355 /* If the texture's format is alpha-only, force R, G, and B to
356 * 0.0. Similarly, if the texture's format has no alpha channel,
357 * force the alpha value read to 1.0. This allows for the
358 * implementation to use an RGBA texture for any of these formats
359 * without leaking any unexpected values.
360 */
361 switch (img->_BaseFormat) {
362 case GL_ALPHA:
363 swizzles[0] = SWIZZLE_ZERO;
364 swizzles[1] = SWIZZLE_ZERO;
365 swizzles[2] = SWIZZLE_ZERO;
366 break;
367 case GL_LUMINANCE:
368 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_ONE;
373 }
374 break;
375 case GL_LUMINANCE_ALPHA:
376 if (datatype == GL_SIGNED_NORMALIZED) {
377 swizzles[0] = SWIZZLE_X;
378 swizzles[1] = SWIZZLE_X;
379 swizzles[2] = SWIZZLE_X;
380 swizzles[3] = SWIZZLE_W;
381 }
382 break;
383 case GL_INTENSITY:
384 if (datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_X;
389 }
390 break;
391 case GL_RED:
392 case GL_RG:
393 case GL_RGB:
394 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
395 swizzles[3] = SWIZZLE_ONE;
396 break;
397 }
398
399 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400 swizzles[GET_SWZ(t->_Swizzle, 1)],
401 swizzles[GET_SWZ(t->_Swizzle, 2)],
402 swizzles[GET_SWZ(t->_Swizzle, 3)]);
403 }
404
405 /**
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
408 *
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * 0 1 2 3 4 5
411 * 4 5 6 7 0 1
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
413 *
414 * which is simply adding 4 then modding by 8 (or anding with 7).
415 *
416 * We then may need to apply workarounds for textureGather hardware bugs.
417 */
418 static unsigned
swizzle_to_scs(GLenum swizzle,bool need_green_to_blue)419 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
420 {
421 unsigned scs = (swizzle + 4) & 7;
422
423 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
424 }
425
426 static unsigned
brw_find_matching_rb(const struct gl_framebuffer * fb,const struct intel_mipmap_tree * mt)427 brw_find_matching_rb(const struct gl_framebuffer *fb,
428 const struct intel_mipmap_tree *mt)
429 {
430 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431 const struct intel_renderbuffer *irb =
432 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433
434 if (irb && irb->mt == mt)
435 return i;
436 }
437
438 return fb->_NumColorDrawBuffers;
439 }
440
441 static inline bool
brw_texture_view_sane(const struct brw_context * brw,const struct intel_mipmap_tree * mt,const struct isl_view * view)442 brw_texture_view_sane(const struct brw_context *brw,
443 const struct intel_mipmap_tree *mt,
444 const struct isl_view *view)
445 {
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw, mt))
448 return true;
449
450 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
451 view->format))
452 return true;
453
454 /* Logic elsewhere needs to take care to resolve the color buffer prior
455 * to sampling it as non-compressed.
456 */
457 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
458 view->base_array_layer,
459 view->array_len))
460 return false;
461
462 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
463 const unsigned rb_index = brw_find_matching_rb(fb, mt);
464
465 if (rb_index == fb->_NumColorDrawBuffers)
466 return true;
467
468 /* Underlying surface is compressed but it is sampled using a format that
469 * the sampling engine doesn't support as compressed. Compression must be
470 * disabled for both sampling engine and data port in case the same surface
471 * is used also as render target.
472 */
473 return brw->draw_aux_buffer_disabled[rb_index];
474 }
475
476 static bool
brw_disable_aux_surface(const struct brw_context * brw,const struct intel_mipmap_tree * mt,const struct isl_view * view)477 brw_disable_aux_surface(const struct brw_context *brw,
478 const struct intel_mipmap_tree *mt,
479 const struct isl_view *view)
480 {
481 /* Nothing to disable. */
482 if (!mt->mcs_buf)
483 return false;
484
485 const bool is_unresolved = intel_miptree_has_color_unresolved(
486 mt, view->base_level, view->levels,
487 view->base_array_layer, view->array_len);
488
489 /* There are special cases only for lossless compression. */
490 if (!intel_miptree_is_lossless_compressed(brw, mt))
491 return !is_unresolved;
492
493 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
494 const unsigned rb_index = brw_find_matching_rb(fb, mt);
495
496 /* If we are drawing into this with compression enabled, then we must also
497 * enable compression when texturing from it regardless of
498 * fast_clear_state. If we don't then, after the first draw call with
499 * this setup, there will be data in the CCS which won't get picked up by
500 * subsequent texturing operations as required by ARB_texture_barrier.
501 * Since we don't want to re-emit the binding table or do a resolve
502 * operation every draw call, the easiest thing to do is just enable
503 * compression on the texturing side. This is completely safe to do
504 * since, if compressed texturing weren't allowed, we would have disabled
505 * compression of render targets in whatever_that_function_is_called().
506 */
507 if (rb_index < fb->_NumColorDrawBuffers) {
508 if (brw->draw_aux_buffer_disabled[rb_index]) {
509 assert(!is_unresolved);
510 }
511
512 return brw->draw_aux_buffer_disabled[rb_index];
513 }
514
515 return !is_unresolved;
516 }
517
518 void
brw_update_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset,bool for_gather,uint32_t plane)519 brw_update_texture_surface(struct gl_context *ctx,
520 unsigned unit,
521 uint32_t *surf_offset,
522 bool for_gather,
523 uint32_t plane)
524 {
525 struct brw_context *brw = brw_context(ctx);
526 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
527
528 if (obj->Target == GL_TEXTURE_BUFFER) {
529 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
530
531 } else {
532 struct intel_texture_object *intel_obj = intel_texture_object(obj);
533 struct intel_mipmap_tree *mt = intel_obj->mt;
534
535 if (plane > 0) {
536 if (mt->plane[plane - 1] == NULL)
537 return;
538 mt = mt->plane[plane - 1];
539 }
540
541 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
542 /* If this is a view with restricted NumLayers, then our effective depth
543 * is not just the miptree depth.
544 */
545 const unsigned view_num_layers =
546 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
547 mt->logical_depth0;
548
549 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
550 * texturing functions that return a float, as our code generation always
551 * selects the .x channel (which would always be 0).
552 */
553 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
554 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
555 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
556 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
557 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
558 brw_get_texture_swizzle(&brw->ctx, obj));
559
560 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
561 unsigned format = translate_tex_format(brw, mesa_fmt,
562 sampler->sRGBDecode);
563
564 /* Implement gen6 and gen7 gather work-around */
565 bool need_green_to_blue = false;
566 if (for_gather) {
567 if (brw->gen == 7 && (format == BRW_SURFACEFORMAT_R32G32_FLOAT ||
568 format == BRW_SURFACEFORMAT_R32G32_SINT ||
569 format == BRW_SURFACEFORMAT_R32G32_UINT)) {
570 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
571 need_green_to_blue = brw->is_haswell;
572 } else if (brw->gen == 6) {
573 /* Sandybridge's gather4 message is broken for integer formats.
574 * To work around this, we pretend the surface is UNORM for
575 * 8 or 16-bit formats, and emit shader instructions to recover
576 * the real INT/UINT value. For 32-bit formats, we pretend
577 * the surface is FLOAT, and simply reinterpret the resulting
578 * bits.
579 */
580 switch (format) {
581 case BRW_SURFACEFORMAT_R8_SINT:
582 case BRW_SURFACEFORMAT_R8_UINT:
583 format = BRW_SURFACEFORMAT_R8_UNORM;
584 break;
585
586 case BRW_SURFACEFORMAT_R16_SINT:
587 case BRW_SURFACEFORMAT_R16_UINT:
588 format = BRW_SURFACEFORMAT_R16_UNORM;
589 break;
590
591 case BRW_SURFACEFORMAT_R32_SINT:
592 case BRW_SURFACEFORMAT_R32_UINT:
593 format = BRW_SURFACEFORMAT_R32_FLOAT;
594 break;
595
596 default:
597 break;
598 }
599 }
600 }
601
602 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
603 if (brw->gen <= 7) {
604 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
605 mt = mt->r8stencil_mt;
606 } else {
607 mt = mt->stencil_mt;
608 }
609 format = BRW_SURFACEFORMAT_R8_UINT;
610 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
611 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
612 mt = mt->r8stencil_mt;
613 format = BRW_SURFACEFORMAT_R8_UINT;
614 }
615
616 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
617
618 struct isl_view view = {
619 .format = format,
620 .base_level = obj->MinLevel + obj->BaseLevel,
621 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
622 .base_array_layer = obj->MinLayer,
623 .array_len = view_num_layers,
624 .swizzle = {
625 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
626 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
627 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
628 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
629 },
630 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
631 };
632
633 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
634 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
635 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
636
637 assert(brw_texture_view_sane(brw, mt, &view));
638
639 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
640 INTEL_AUX_BUFFER_DISABLED : 0;
641 brw_emit_surface_state(brw, mt, flags, mt->target, view,
642 tex_mocs[brw->gen],
643 surf_offset, surf_index,
644 I915_GEM_DOMAIN_SAMPLER, 0);
645 }
646 }
647
648 void
brw_emit_buffer_surface_state(struct brw_context * brw,uint32_t * out_offset,drm_intel_bo * bo,unsigned buffer_offset,unsigned surface_format,unsigned buffer_size,unsigned pitch,bool rw)649 brw_emit_buffer_surface_state(struct brw_context *brw,
650 uint32_t *out_offset,
651 drm_intel_bo *bo,
652 unsigned buffer_offset,
653 unsigned surface_format,
654 unsigned buffer_size,
655 unsigned pitch,
656 bool rw)
657 {
658 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
659 brw->isl_dev.ss.size,
660 brw->isl_dev.ss.align,
661 out_offset);
662
663 isl_buffer_fill_state(&brw->isl_dev, dw,
664 .address = (bo ? bo->offset64 : 0) + buffer_offset,
665 .size = buffer_size,
666 .format = surface_format,
667 .stride = pitch,
668 .mocs = tex_mocs[brw->gen]);
669
670 if (bo) {
671 drm_intel_bo_emit_reloc(brw->batch.bo,
672 *out_offset + brw->isl_dev.ss.addr_offset,
673 bo, buffer_offset,
674 I915_GEM_DOMAIN_SAMPLER,
675 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
676 }
677 }
678
679 void
brw_update_buffer_texture_surface(struct gl_context * ctx,unsigned unit,uint32_t * surf_offset)680 brw_update_buffer_texture_surface(struct gl_context *ctx,
681 unsigned unit,
682 uint32_t *surf_offset)
683 {
684 struct brw_context *brw = brw_context(ctx);
685 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
686 struct intel_buffer_object *intel_obj =
687 intel_buffer_object(tObj->BufferObject);
688 uint32_t size = tObj->BufferSize;
689 drm_intel_bo *bo = NULL;
690 mesa_format format = tObj->_BufferObjectFormat;
691 uint32_t brw_format = brw_format_for_mesa_format(format);
692 int texel_size = _mesa_get_format_bytes(format);
693
694 if (intel_obj) {
695 size = MIN2(size, intel_obj->Base.Size);
696 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
697 }
698
699 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
700 _mesa_problem(NULL, "bad format %s for texture buffer\n",
701 _mesa_get_format_name(format));
702 }
703
704 brw_emit_buffer_surface_state(brw, surf_offset, bo,
705 tObj->BufferOffset,
706 brw_format,
707 size,
708 texel_size,
709 false /* rw */);
710 }
711
712 /**
713 * Create the constant buffer surface. Vertex/fragment shader constants will be
714 * read from this buffer with Data Port Read instructions/messages.
715 */
716 void
brw_create_constant_surface(struct brw_context * brw,drm_intel_bo * bo,uint32_t offset,uint32_t size,uint32_t * out_offset)717 brw_create_constant_surface(struct brw_context *brw,
718 drm_intel_bo *bo,
719 uint32_t offset,
720 uint32_t size,
721 uint32_t *out_offset)
722 {
723 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
724 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
725 size, 1, false);
726 }
727
728 /**
729 * Create the buffer surface. Shader buffer variables will be
730 * read from / write to this buffer with Data Port Read/Write
731 * instructions/messages.
732 */
733 void
brw_create_buffer_surface(struct brw_context * brw,drm_intel_bo * bo,uint32_t offset,uint32_t size,uint32_t * out_offset)734 brw_create_buffer_surface(struct brw_context *brw,
735 drm_intel_bo *bo,
736 uint32_t offset,
737 uint32_t size,
738 uint32_t *out_offset)
739 {
740 /* Use a raw surface so we can reuse existing untyped read/write/atomic
741 * messages. We need these specifically for the fragment shader since they
742 * include a pixel mask header that we need to ensure correct behavior
743 * with helper invocations, which cannot write to the buffer.
744 */
745 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
746 BRW_SURFACEFORMAT_RAW,
747 size, 1, true);
748 }
749
750 /**
751 * Set up a binding table entry for use by stream output logic (transform
752 * feedback).
753 *
754 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
755 */
756 void
brw_update_sol_surface(struct brw_context * brw,struct gl_buffer_object * buffer_obj,uint32_t * out_offset,unsigned num_vector_components,unsigned stride_dwords,unsigned offset_dwords)757 brw_update_sol_surface(struct brw_context *brw,
758 struct gl_buffer_object *buffer_obj,
759 uint32_t *out_offset, unsigned num_vector_components,
760 unsigned stride_dwords, unsigned offset_dwords)
761 {
762 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
763 uint32_t offset_bytes = 4 * offset_dwords;
764 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
765 offset_bytes,
766 buffer_obj->Size - offset_bytes);
767 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
768 out_offset);
769 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
770 size_t size_dwords = buffer_obj->Size / 4;
771 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
772
773 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
774 * too big to map using a single binding table entry?
775 */
776 assert((size_dwords - offset_dwords) / stride_dwords
777 <= BRW_MAX_NUM_BUFFER_ENTRIES);
778
779 if (size_dwords > offset_dwords + num_vector_components) {
780 /* There is room for at least 1 transform feedback output in the buffer.
781 * Compute the number of additional transform feedback outputs the
782 * buffer has room for.
783 */
784 buffer_size_minus_1 =
785 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
786 } else {
787 /* There isn't even room for a single transform feedback output in the
788 * buffer. We can't configure the binding table entry to prevent output
789 * entirely; we'll have to rely on the geometry shader to detect
790 * overflow. But to minimize the damage in case of a bug, set up the
791 * binding table entry to just allow a single output.
792 */
793 buffer_size_minus_1 = 0;
794 }
795 width = buffer_size_minus_1 & 0x7f;
796 height = (buffer_size_minus_1 & 0xfff80) >> 7;
797 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
798
799 switch (num_vector_components) {
800 case 1:
801 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
802 break;
803 case 2:
804 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
805 break;
806 case 3:
807 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
808 break;
809 case 4:
810 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
811 break;
812 default:
813 unreachable("Invalid vector size for transform feedback output");
814 }
815
816 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
817 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
818 surface_format << BRW_SURFACE_FORMAT_SHIFT |
819 BRW_SURFACE_RC_READ_WRITE;
820 surf[1] = bo->offset64 + offset_bytes; /* reloc */
821 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
822 height << BRW_SURFACE_HEIGHT_SHIFT);
823 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
824 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
825 surf[4] = 0;
826 surf[5] = 0;
827
828 /* Emit relocation to surface contents. */
829 drm_intel_bo_emit_reloc(brw->batch.bo,
830 *out_offset + 4,
831 bo, offset_bytes,
832 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
833 }
834
835 /* Creates a new WM constant buffer reflecting the current fragment program's
836 * constants, if needed by the fragment program.
837 *
838 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
839 * state atom.
840 */
841 static void
brw_upload_wm_pull_constants(struct brw_context * brw)842 brw_upload_wm_pull_constants(struct brw_context *brw)
843 {
844 struct brw_stage_state *stage_state = &brw->wm.base;
845 /* BRW_NEW_FRAGMENT_PROGRAM */
846 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
847 /* BRW_NEW_FS_PROG_DATA */
848 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
849
850 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
851 /* _NEW_PROGRAM_CONSTANTS */
852 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
853 stage_state, prog_data);
854 }
855
856 const struct brw_tracked_state brw_wm_pull_constants = {
857 .dirty = {
858 .mesa = _NEW_PROGRAM_CONSTANTS,
859 .brw = BRW_NEW_BATCH |
860 BRW_NEW_BLORP |
861 BRW_NEW_FRAGMENT_PROGRAM |
862 BRW_NEW_FS_PROG_DATA,
863 },
864 .emit = brw_upload_wm_pull_constants,
865 };
866
867 /**
868 * Creates a null renderbuffer surface.
869 *
870 * This is used when the shader doesn't write to any color output. An FB
871 * write to target 0 will still be emitted, because that's how the thread is
872 * terminated (and computed depth is returned), so we need to have the
873 * hardware discard the target 0 color output..
874 */
875 static void
brw_emit_null_surface_state(struct brw_context * brw,unsigned width,unsigned height,unsigned samples,uint32_t * out_offset)876 brw_emit_null_surface_state(struct brw_context *brw,
877 unsigned width,
878 unsigned height,
879 unsigned samples,
880 uint32_t *out_offset)
881 {
882 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
883 * Notes):
884 *
885 * A null surface will be used in instances where an actual surface is
886 * not bound. When a write message is generated to a null surface, no
887 * actual surface is written to. When a read message (including any
888 * sampling engine message) is generated to a null surface, the result
889 * is all zeros. Note that a null surface type is allowed to be used
890 * with all messages, even if it is not specificially indicated as
891 * supported. All of the remaining fields in surface state are ignored
892 * for null surfaces, with the following exceptions:
893 *
894 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
895 * depth buffer’s corresponding state for all render target surfaces,
896 * including null.
897 *
898 * - Surface Format must be R8G8B8A8_UNORM.
899 */
900 unsigned surface_type = BRW_SURFACE_NULL;
901 drm_intel_bo *bo = NULL;
902 unsigned pitch_minus_1 = 0;
903 uint32_t multisampling_state = 0;
904 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
905 out_offset);
906
907 if (samples > 1) {
908 /* On Gen6, null render targets seem to cause GPU hangs when
909 * multisampling. So work around this problem by rendering into dummy
910 * color buffer.
911 *
912 * To decrease the amount of memory needed by the workaround buffer, we
913 * set its pitch to 128 bytes (the width of a Y tile). This means that
914 * the amount of memory needed for the workaround buffer is
915 * (width_in_tiles + height_in_tiles - 1) tiles.
916 *
917 * Note that since the workaround buffer will be interpreted by the
918 * hardware as an interleaved multisampled buffer, we need to compute
919 * width_in_tiles and height_in_tiles by dividing the width and height
920 * by 16 rather than the normal Y-tile size of 32.
921 */
922 unsigned width_in_tiles = ALIGN(width, 16) / 16;
923 unsigned height_in_tiles = ALIGN(height, 16) / 16;
924 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
925 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
926 size_needed);
927 bo = brw->wm.multisampled_null_render_target_bo;
928 surface_type = BRW_SURFACE_2D;
929 pitch_minus_1 = 127;
930 multisampling_state = brw_get_surface_num_multisamples(samples);
931 }
932
933 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
934 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
935 if (brw->gen < 6) {
936 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
937 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
938 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
939 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
940 }
941 surf[1] = bo ? bo->offset64 : 0;
942 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
943 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
944
945 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
946 * Notes):
947 *
948 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
949 */
950 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
951 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
952 surf[4] = multisampling_state;
953 surf[5] = 0;
954
955 if (bo) {
956 drm_intel_bo_emit_reloc(brw->batch.bo,
957 *out_offset + 4,
958 bo, 0,
959 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
960 }
961 }
962
963 /**
964 * Sets up a surface state structure to point at the given region.
965 * While it is only used for the front/back buffer currently, it should be
966 * usable for further buffers when doing ARB_draw_buffer support.
967 */
968 static uint32_t
gen4_update_renderbuffer_surface(struct brw_context * brw,struct gl_renderbuffer * rb,uint32_t flags,unsigned unit,uint32_t surf_index)969 gen4_update_renderbuffer_surface(struct brw_context *brw,
970 struct gl_renderbuffer *rb,
971 uint32_t flags, unsigned unit,
972 uint32_t surf_index)
973 {
974 struct gl_context *ctx = &brw->ctx;
975 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
976 struct intel_mipmap_tree *mt = irb->mt;
977 uint32_t *surf;
978 uint32_t tile_x, tile_y;
979 uint32_t format = 0;
980 uint32_t offset;
981 /* _NEW_BUFFERS */
982 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
983 /* BRW_NEW_FS_PROG_DATA */
984
985 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
986 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
987
988 if (rb->TexImage && !brw->has_surface_tile_offset) {
989 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
990
991 if (tile_x != 0 || tile_y != 0) {
992 /* Original gen4 hardware couldn't draw to a non-tile-aligned
993 * destination in a miptree unless you actually setup your renderbuffer
994 * as a miptree and used the fragile lod/array_index/etc. controls to
995 * select the image. So, instead, we just make a new single-level
996 * miptree and render into that.
997 */
998 intel_renderbuffer_move_to_temp(brw, irb, false);
999 mt = irb->mt;
1000 }
1001 }
1002
1003 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1004
1005 format = brw->render_target_format[rb_format];
1006 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1007 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1008 __func__, _mesa_get_format_name(rb_format));
1009 }
1010
1011 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1012 format << BRW_SURFACE_FORMAT_SHIFT);
1013
1014 /* reloc */
1015 assert(mt->offset % mt->cpp == 0);
1016 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1017 mt->bo->offset64 + mt->offset);
1018
1019 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1020 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1021
1022 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1023 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1024
1025 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1026
1027 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1028 /* Note that the low bits of these fields are missing, so
1029 * there's the possibility of getting in trouble.
1030 */
1031 assert(tile_x % 4 == 0);
1032 assert(tile_y % 2 == 0);
1033 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1034 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1035 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1036
1037 if (brw->gen < 6) {
1038 /* _NEW_COLOR */
1039 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1040 (ctx->Color.BlendEnabled & (1 << unit)))
1041 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1042
1043 if (!ctx->Color.ColorMask[unit][0])
1044 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1045 if (!ctx->Color.ColorMask[unit][1])
1046 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1047 if (!ctx->Color.ColorMask[unit][2])
1048 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1049
1050 /* As mentioned above, disable writes to the alpha component when the
1051 * renderbuffer is XRGB.
1052 */
1053 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1054 !ctx->Color.ColorMask[unit][3]) {
1055 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1056 }
1057 }
1058
1059 drm_intel_bo_emit_reloc(brw->batch.bo,
1060 offset + 4,
1061 mt->bo,
1062 surf[1] - mt->bo->offset64,
1063 I915_GEM_DOMAIN_RENDER,
1064 I915_GEM_DOMAIN_RENDER);
1065
1066 return offset;
1067 }
1068
1069 /**
1070 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1071 */
1072 void
brw_update_renderbuffer_surfaces(struct brw_context * brw,const struct gl_framebuffer * fb,uint32_t render_target_start,uint32_t * surf_offset)1073 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1074 const struct gl_framebuffer *fb,
1075 uint32_t render_target_start,
1076 uint32_t *surf_offset)
1077 {
1078 GLuint i;
1079 const unsigned int w = _mesa_geometric_width(fb);
1080 const unsigned int h = _mesa_geometric_height(fb);
1081 const unsigned int s = _mesa_geometric_samples(fb);
1082
1083 /* Update surfaces for drawing buffers */
1084 if (fb->_NumColorDrawBuffers >= 1) {
1085 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1086 const uint32_t surf_index = render_target_start + i;
1087 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1088 INTEL_RENDERBUFFER_LAYERED : 0) |
1089 (brw->draw_aux_buffer_disabled[i] ?
1090 INTEL_AUX_BUFFER_DISABLED : 0);
1091
1092 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1093 surf_offset[surf_index] =
1094 brw->vtbl.update_renderbuffer_surface(
1095 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1096 } else {
1097 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1098 &surf_offset[surf_index]);
1099 }
1100 }
1101 } else {
1102 const uint32_t surf_index = render_target_start;
1103 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1104 &surf_offset[surf_index]);
1105 }
1106 }
1107
1108 static void
update_renderbuffer_surfaces(struct brw_context * brw)1109 update_renderbuffer_surfaces(struct brw_context *brw)
1110 {
1111 const struct gl_context *ctx = &brw->ctx;
1112
1113 /* BRW_NEW_FS_PROG_DATA */
1114 const struct brw_wm_prog_data *wm_prog_data =
1115 brw_wm_prog_data(brw->wm.base.prog_data);
1116
1117 /* _NEW_BUFFERS | _NEW_COLOR */
1118 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1119 brw_update_renderbuffer_surfaces(
1120 brw, fb,
1121 wm_prog_data->binding_table.render_target_start,
1122 brw->wm.base.surf_offset);
1123 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1124 }
1125
1126 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1127 .dirty = {
1128 .mesa = _NEW_BUFFERS |
1129 _NEW_COLOR,
1130 .brw = BRW_NEW_BATCH |
1131 BRW_NEW_BLORP |
1132 BRW_NEW_FS_PROG_DATA,
1133 },
1134 .emit = update_renderbuffer_surfaces,
1135 };
1136
1137 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1138 .dirty = {
1139 .mesa = _NEW_BUFFERS,
1140 .brw = BRW_NEW_BATCH |
1141 BRW_NEW_BLORP,
1142 },
1143 .emit = update_renderbuffer_surfaces,
1144 };
1145
1146 static void
update_renderbuffer_read_surfaces(struct brw_context * brw)1147 update_renderbuffer_read_surfaces(struct brw_context *brw)
1148 {
1149 const struct gl_context *ctx = &brw->ctx;
1150
1151 /* BRW_NEW_FS_PROG_DATA */
1152 const struct brw_wm_prog_data *wm_prog_data =
1153 brw_wm_prog_data(brw->wm.base.prog_data);
1154
1155 /* BRW_NEW_FRAGMENT_PROGRAM */
1156 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1157 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1158 /* _NEW_BUFFERS */
1159 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1160
1161 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1162 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1163 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1164 const unsigned surf_index =
1165 wm_prog_data->binding_table.render_target_read_start + i;
1166 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1167
1168 if (irb) {
1169 const unsigned format = brw->render_target_format[
1170 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1171 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1172 format));
1173
1174 /* Override the target of the texture if the render buffer is a
1175 * single slice of a 3D texture (since the minimum array element
1176 * field of the surface state structure is ignored by the sampler
1177 * unit for 3D textures on some hardware), or if the render buffer
1178 * is a 1D array (since shaders always provide the array index
1179 * coordinate at the Z component to avoid state-dependent
1180 * recompiles when changing the texture target of the
1181 * framebuffer).
1182 */
1183 const GLenum target =
1184 (irb->mt->target == GL_TEXTURE_3D &&
1185 irb->layer_count == 1) ? GL_TEXTURE_2D :
1186 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1187 irb->mt->target;
1188
1189 /* intel_renderbuffer::mt_layer is expressed in sample units for
1190 * the UMS and CMS multisample layouts, but
1191 * intel_renderbuffer::layer_count is expressed in units of whole
1192 * logical layers regardless of the multisample layout.
1193 */
1194 const unsigned mt_layer_unit =
1195 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1196 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1197 MAX2(irb->mt->num_samples, 1) : 1;
1198
1199 const struct isl_view view = {
1200 .format = format,
1201 .base_level = irb->mt_level - irb->mt->first_level,
1202 .levels = 1,
1203 .base_array_layer = irb->mt_layer / mt_layer_unit,
1204 .array_len = irb->layer_count,
1205 .swizzle = ISL_SWIZZLE_IDENTITY,
1206 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1207 };
1208
1209 const int flags = brw->draw_aux_buffer_disabled[i] ?
1210 INTEL_AUX_BUFFER_DISABLED : 0;
1211 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1212 tex_mocs[brw->gen],
1213 surf_offset, surf_index,
1214 I915_GEM_DOMAIN_SAMPLER, 0);
1215
1216 } else {
1217 brw->vtbl.emit_null_surface_state(
1218 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1219 _mesa_geometric_samples(fb), surf_offset);
1220 }
1221 }
1222
1223 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1224 }
1225 }
1226
1227 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1228 .dirty = {
1229 .mesa = _NEW_BUFFERS,
1230 .brw = BRW_NEW_BATCH |
1231 BRW_NEW_FRAGMENT_PROGRAM |
1232 BRW_NEW_FS_PROG_DATA,
1233 },
1234 .emit = update_renderbuffer_read_surfaces,
1235 };
1236
1237 static void
update_stage_texture_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,bool for_gather,uint32_t plane)1238 update_stage_texture_surfaces(struct brw_context *brw,
1239 const struct gl_program *prog,
1240 struct brw_stage_state *stage_state,
1241 bool for_gather, uint32_t plane)
1242 {
1243 if (!prog)
1244 return;
1245
1246 struct gl_context *ctx = &brw->ctx;
1247
1248 uint32_t *surf_offset = stage_state->surf_offset;
1249
1250 /* BRW_NEW_*_PROG_DATA */
1251 if (for_gather)
1252 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1253 else
1254 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1255
1256 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1257 for (unsigned s = 0; s < num_samplers; s++) {
1258 surf_offset[s] = 0;
1259
1260 if (prog->SamplersUsed & (1 << s)) {
1261 const unsigned unit = prog->SamplerUnits[s];
1262
1263 /* _NEW_TEXTURE */
1264 if (ctx->Texture.Unit[unit]._Current) {
1265 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1266 }
1267 }
1268 }
1269 }
1270
1271
1272 /**
1273 * Construct SURFACE_STATE objects for enabled textures.
1274 */
1275 static void
brw_update_texture_surfaces(struct brw_context * brw)1276 brw_update_texture_surfaces(struct brw_context *brw)
1277 {
1278 /* BRW_NEW_VERTEX_PROGRAM */
1279 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1280
1281 /* BRW_NEW_TESS_PROGRAMS */
1282 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1283 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1284
1285 /* BRW_NEW_GEOMETRY_PROGRAM */
1286 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1287
1288 /* BRW_NEW_FRAGMENT_PROGRAM */
1289 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1290
1291 /* _NEW_TEXTURE */
1292 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1293 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1294 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1295 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1296 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1297
1298 /* emit alternate set of surface state for gather. this
1299 * allows the surface format to be overriden for only the
1300 * gather4 messages. */
1301 if (brw->gen < 8) {
1302 if (vs && vs->nir->info->uses_texture_gather)
1303 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1304 if (tcs && tcs->nir->info->uses_texture_gather)
1305 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1306 if (tes && tes->nir->info->uses_texture_gather)
1307 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1308 if (gs && gs->nir->info->uses_texture_gather)
1309 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1310 if (fs && fs->nir->info->uses_texture_gather)
1311 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1312 }
1313
1314 if (fs) {
1315 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1316 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1317 }
1318
1319 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1320 }
1321
1322 const struct brw_tracked_state brw_texture_surfaces = {
1323 .dirty = {
1324 .mesa = _NEW_TEXTURE,
1325 .brw = BRW_NEW_BATCH |
1326 BRW_NEW_BLORP |
1327 BRW_NEW_FRAGMENT_PROGRAM |
1328 BRW_NEW_FS_PROG_DATA |
1329 BRW_NEW_GEOMETRY_PROGRAM |
1330 BRW_NEW_GS_PROG_DATA |
1331 BRW_NEW_TESS_PROGRAMS |
1332 BRW_NEW_TCS_PROG_DATA |
1333 BRW_NEW_TES_PROG_DATA |
1334 BRW_NEW_TEXTURE_BUFFER |
1335 BRW_NEW_VERTEX_PROGRAM |
1336 BRW_NEW_VS_PROG_DATA,
1337 },
1338 .emit = brw_update_texture_surfaces,
1339 };
1340
1341 static void
brw_update_cs_texture_surfaces(struct brw_context * brw)1342 brw_update_cs_texture_surfaces(struct brw_context *brw)
1343 {
1344 /* BRW_NEW_COMPUTE_PROGRAM */
1345 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1346
1347 /* _NEW_TEXTURE */
1348 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1349
1350 /* emit alternate set of surface state for gather. this
1351 * allows the surface format to be overriden for only the
1352 * gather4 messages.
1353 */
1354 if (brw->gen < 8) {
1355 if (cs && cs->nir->info->uses_texture_gather)
1356 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1357 }
1358
1359 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1360 }
1361
1362 const struct brw_tracked_state brw_cs_texture_surfaces = {
1363 .dirty = {
1364 .mesa = _NEW_TEXTURE,
1365 .brw = BRW_NEW_BATCH |
1366 BRW_NEW_BLORP |
1367 BRW_NEW_COMPUTE_PROGRAM,
1368 },
1369 .emit = brw_update_cs_texture_surfaces,
1370 };
1371
1372
1373 void
brw_upload_ubo_surfaces(struct brw_context * brw,struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1374 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1375 struct brw_stage_state *stage_state,
1376 struct brw_stage_prog_data *prog_data)
1377 {
1378 struct gl_context *ctx = &brw->ctx;
1379
1380 if (!prog)
1381 return;
1382
1383 uint32_t *ubo_surf_offsets =
1384 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1385
1386 for (int i = 0; i < prog->info.num_ubos; i++) {
1387 struct gl_uniform_buffer_binding *binding =
1388 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1389
1390 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1391 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1392 } else {
1393 struct intel_buffer_object *intel_bo =
1394 intel_buffer_object(binding->BufferObject);
1395 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1396 if (!binding->AutomaticSize)
1397 size = MIN2(size, binding->Size);
1398 drm_intel_bo *bo =
1399 intel_bufferobj_buffer(brw, intel_bo,
1400 binding->Offset,
1401 size);
1402 brw_create_constant_surface(brw, bo, binding->Offset,
1403 size,
1404 &ubo_surf_offsets[i]);
1405 }
1406 }
1407
1408 uint32_t *ssbo_surf_offsets =
1409 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1410
1411 for (int i = 0; i < prog->info.num_ssbos; i++) {
1412 struct gl_shader_storage_buffer_binding *binding =
1413 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1414
1415 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1416 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1417 } else {
1418 struct intel_buffer_object *intel_bo =
1419 intel_buffer_object(binding->BufferObject);
1420 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1421 if (!binding->AutomaticSize)
1422 size = MIN2(size, binding->Size);
1423 drm_intel_bo *bo =
1424 intel_bufferobj_buffer(brw, intel_bo,
1425 binding->Offset,
1426 size);
1427 brw_create_buffer_surface(brw, bo, binding->Offset,
1428 size,
1429 &ssbo_surf_offsets[i]);
1430 }
1431 }
1432
1433 if (prog->info.num_ubos || prog->info.num_ssbos)
1434 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1435 }
1436
1437 static void
brw_upload_wm_ubo_surfaces(struct brw_context * brw)1438 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1439 {
1440 struct gl_context *ctx = &brw->ctx;
1441 /* _NEW_PROGRAM */
1442 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1443
1444 /* BRW_NEW_FS_PROG_DATA */
1445 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1446 }
1447
1448 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1449 .dirty = {
1450 .mesa = _NEW_PROGRAM,
1451 .brw = BRW_NEW_BATCH |
1452 BRW_NEW_BLORP |
1453 BRW_NEW_FS_PROG_DATA |
1454 BRW_NEW_UNIFORM_BUFFER,
1455 },
1456 .emit = brw_upload_wm_ubo_surfaces,
1457 };
1458
1459 static void
brw_upload_cs_ubo_surfaces(struct brw_context * brw)1460 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1461 {
1462 struct gl_context *ctx = &brw->ctx;
1463 /* _NEW_PROGRAM */
1464 struct gl_shader_program *prog =
1465 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1466
1467 if (!prog || !prog->_LinkedShaders[MESA_SHADER_COMPUTE])
1468 return;
1469
1470 /* BRW_NEW_CS_PROG_DATA */
1471 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE]->Program,
1472 &brw->cs.base, brw->cs.base.prog_data);
1473 }
1474
1475 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1476 .dirty = {
1477 .mesa = _NEW_PROGRAM,
1478 .brw = BRW_NEW_BATCH |
1479 BRW_NEW_BLORP |
1480 BRW_NEW_CS_PROG_DATA |
1481 BRW_NEW_UNIFORM_BUFFER,
1482 },
1483 .emit = brw_upload_cs_ubo_surfaces,
1484 };
1485
1486 void
brw_upload_abo_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1487 brw_upload_abo_surfaces(struct brw_context *brw,
1488 const struct gl_program *prog,
1489 struct brw_stage_state *stage_state,
1490 struct brw_stage_prog_data *prog_data)
1491 {
1492 struct gl_context *ctx = &brw->ctx;
1493 uint32_t *surf_offsets =
1494 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1495
1496 if (prog->info.num_abos) {
1497 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1498 struct gl_atomic_buffer_binding *binding =
1499 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1500 struct intel_buffer_object *intel_bo =
1501 intel_buffer_object(binding->BufferObject);
1502 drm_intel_bo *bo = intel_bufferobj_buffer(
1503 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1504
1505 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1506 binding->Offset, BRW_SURFACEFORMAT_RAW,
1507 bo->size - binding->Offset, 1, true);
1508 }
1509
1510 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1511 }
1512 }
1513
1514 static void
brw_upload_wm_abo_surfaces(struct brw_context * brw)1515 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1516 {
1517 /* _NEW_PROGRAM */
1518 const struct gl_program *wm = brw->fragment_program;
1519
1520 if (wm) {
1521 /* BRW_NEW_FS_PROG_DATA */
1522 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1523 }
1524 }
1525
1526 const struct brw_tracked_state brw_wm_abo_surfaces = {
1527 .dirty = {
1528 .mesa = _NEW_PROGRAM,
1529 .brw = BRW_NEW_ATOMIC_BUFFER |
1530 BRW_NEW_BLORP |
1531 BRW_NEW_BATCH |
1532 BRW_NEW_FS_PROG_DATA,
1533 },
1534 .emit = brw_upload_wm_abo_surfaces,
1535 };
1536
1537 static void
brw_upload_cs_abo_surfaces(struct brw_context * brw)1538 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1539 {
1540 /* _NEW_PROGRAM */
1541 const struct gl_program *cp = brw->compute_program;
1542
1543 if (cp) {
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1546 }
1547 }
1548
1549 const struct brw_tracked_state brw_cs_abo_surfaces = {
1550 .dirty = {
1551 .mesa = _NEW_PROGRAM,
1552 .brw = BRW_NEW_ATOMIC_BUFFER |
1553 BRW_NEW_BLORP |
1554 BRW_NEW_BATCH |
1555 BRW_NEW_CS_PROG_DATA,
1556 },
1557 .emit = brw_upload_cs_abo_surfaces,
1558 };
1559
1560 static void
brw_upload_cs_image_surfaces(struct brw_context * brw)1561 brw_upload_cs_image_surfaces(struct brw_context *brw)
1562 {
1563 /* _NEW_PROGRAM */
1564 const struct gl_program *cp = brw->compute_program;
1565
1566 if (cp) {
1567 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1568 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1569 brw->cs.base.prog_data);
1570 }
1571 }
1572
1573 const struct brw_tracked_state brw_cs_image_surfaces = {
1574 .dirty = {
1575 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1576 .brw = BRW_NEW_BATCH |
1577 BRW_NEW_BLORP |
1578 BRW_NEW_CS_PROG_DATA |
1579 BRW_NEW_IMAGE_UNITS
1580 },
1581 .emit = brw_upload_cs_image_surfaces,
1582 };
1583
1584 static uint32_t
get_image_format(struct brw_context * brw,mesa_format format,GLenum access)1585 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1586 {
1587 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1588 uint32_t hw_format = brw_format_for_mesa_format(format);
1589 if (access == GL_WRITE_ONLY) {
1590 return hw_format;
1591 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1592 /* Typed surface reads support a very limited subset of the shader
1593 * image formats. Translate it into the closest format the
1594 * hardware supports.
1595 */
1596 return isl_lower_storage_image_format(devinfo, hw_format);
1597 } else {
1598 /* The hardware doesn't actually support a typed format that we can use
1599 * so we have to fall back to untyped read/write messages.
1600 */
1601 return BRW_SURFACEFORMAT_RAW;
1602 }
1603 }
1604
1605 static void
update_default_image_param(struct brw_context * brw,struct gl_image_unit * u,unsigned surface_idx,struct brw_image_param * param)1606 update_default_image_param(struct brw_context *brw,
1607 struct gl_image_unit *u,
1608 unsigned surface_idx,
1609 struct brw_image_param *param)
1610 {
1611 memset(param, 0, sizeof(*param));
1612 param->surface_idx = surface_idx;
1613 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1614 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1615 * detailed explanation of these parameters.
1616 */
1617 param->swizzling[0] = 0xff;
1618 param->swizzling[1] = 0xff;
1619 }
1620
1621 static void
update_buffer_image_param(struct brw_context * brw,struct gl_image_unit * u,unsigned surface_idx,struct brw_image_param * param)1622 update_buffer_image_param(struct brw_context *brw,
1623 struct gl_image_unit *u,
1624 unsigned surface_idx,
1625 struct brw_image_param *param)
1626 {
1627 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1628 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1629 update_default_image_param(brw, u, surface_idx, param);
1630
1631 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1632 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1633 }
1634
1635 static void
update_texture_image_param(struct brw_context * brw,struct gl_image_unit * u,unsigned surface_idx,struct brw_image_param * param)1636 update_texture_image_param(struct brw_context *brw,
1637 struct gl_image_unit *u,
1638 unsigned surface_idx,
1639 struct brw_image_param *param)
1640 {
1641 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1642
1643 update_default_image_param(brw, u, surface_idx, param);
1644
1645 param->size[0] = minify(mt->logical_width0, u->Level);
1646 param->size[1] = minify(mt->logical_height0, u->Level);
1647 param->size[2] = (!u->Layered ? 1 :
1648 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1649 u->TexObj->Target == GL_TEXTURE_3D ?
1650 minify(mt->logical_depth0, u->Level) :
1651 mt->logical_depth0);
1652
1653 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1654 ¶m->offset[0],
1655 ¶m->offset[1]);
1656
1657 param->stride[0] = mt->cpp;
1658 param->stride[1] = mt->pitch / mt->cpp;
1659 param->stride[2] =
1660 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1661 param->stride[3] =
1662 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1663
1664 if (mt->tiling == I915_TILING_X) {
1665 /* An X tile is a rectangular block of 512x8 bytes. */
1666 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1667 param->tiling[1] = _mesa_logbase2(8);
1668
1669 if (brw->has_swizzling) {
1670 /* Right shifts required to swizzle bits 9 and 10 of the memory
1671 * address with bit 6.
1672 */
1673 param->swizzling[0] = 3;
1674 param->swizzling[1] = 4;
1675 }
1676 } else if (mt->tiling == I915_TILING_Y) {
1677 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1678 * different to the layout of an X-tiled surface, we simply pretend that
1679 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1680 * one arranged in X-major order just like is the case for X-tiling.
1681 */
1682 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1683 param->tiling[1] = _mesa_logbase2(32);
1684
1685 if (brw->has_swizzling) {
1686 /* Right shift required to swizzle bit 9 of the memory address with
1687 * bit 6.
1688 */
1689 param->swizzling[0] = 3;
1690 }
1691 }
1692
1693 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1694 * address calculation algorithm (emit_address_calculation() in
1695 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1696 * modulus equal to the LOD.
1697 */
1698 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1699 0);
1700 }
1701
1702 static void
update_image_surface(struct brw_context * brw,struct gl_image_unit * u,GLenum access,unsigned surface_idx,uint32_t * surf_offset,struct brw_image_param * param)1703 update_image_surface(struct brw_context *brw,
1704 struct gl_image_unit *u,
1705 GLenum access,
1706 unsigned surface_idx,
1707 uint32_t *surf_offset,
1708 struct brw_image_param *param)
1709 {
1710 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1711 struct gl_texture_object *obj = u->TexObj;
1712 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1713
1714 if (obj->Target == GL_TEXTURE_BUFFER) {
1715 struct intel_buffer_object *intel_obj =
1716 intel_buffer_object(obj->BufferObject);
1717 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1718 _mesa_get_format_bytes(u->_ActualFormat));
1719
1720 brw_emit_buffer_surface_state(
1721 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1722 format, intel_obj->Base.Size, texel_size,
1723 access != GL_READ_ONLY);
1724
1725 update_buffer_image_param(brw, u, surface_idx, param);
1726
1727 } else {
1728 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1729 struct intel_mipmap_tree *mt = intel_obj->mt;
1730
1731 if (format == BRW_SURFACEFORMAT_RAW) {
1732 brw_emit_buffer_surface_state(
1733 brw, surf_offset, mt->bo, mt->offset,
1734 format, mt->bo->size - mt->offset, 1 /* pitch */,
1735 access != GL_READ_ONLY);
1736
1737 } else {
1738 const unsigned num_layers = (!u->Layered ? 1 :
1739 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1740 mt->logical_depth0);
1741
1742 struct isl_view view = {
1743 .format = format,
1744 .base_level = obj->MinLevel + u->Level,
1745 .levels = 1,
1746 .base_array_layer = obj->MinLayer + u->_Layer,
1747 .array_len = num_layers,
1748 .swizzle = ISL_SWIZZLE_IDENTITY,
1749 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1750 };
1751
1752 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1753 const bool unresolved = intel_miptree_has_color_unresolved(
1754 mt, view.base_level, view.levels,
1755 view.base_array_layer, view.array_len);
1756 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1757 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1758 tex_mocs[brw->gen],
1759 surf_offset, surf_index,
1760 I915_GEM_DOMAIN_SAMPLER,
1761 access == GL_READ_ONLY ? 0 :
1762 I915_GEM_DOMAIN_SAMPLER);
1763 }
1764
1765 update_texture_image_param(brw, u, surface_idx, param);
1766 }
1767
1768 } else {
1769 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1770 update_default_image_param(brw, u, surface_idx, param);
1771 }
1772 }
1773
1774 void
brw_upload_image_surfaces(struct brw_context * brw,const struct gl_program * prog,struct brw_stage_state * stage_state,struct brw_stage_prog_data * prog_data)1775 brw_upload_image_surfaces(struct brw_context *brw,
1776 const struct gl_program *prog,
1777 struct brw_stage_state *stage_state,
1778 struct brw_stage_prog_data *prog_data)
1779 {
1780 assert(prog);
1781 struct gl_context *ctx = &brw->ctx;
1782
1783 if (prog->info.num_images) {
1784 for (unsigned i = 0; i < prog->info.num_images; i++) {
1785 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1786 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1787
1788 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1789 surf_idx,
1790 &stage_state->surf_offset[surf_idx],
1791 &prog_data->image_param[i]);
1792 }
1793
1794 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1795 /* This may have changed the image metadata dependent on the context
1796 * image unit state and passed to the program as uniforms, make sure
1797 * that push and pull constants are reuploaded.
1798 */
1799 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1800 }
1801 }
1802
1803 static void
brw_upload_wm_image_surfaces(struct brw_context * brw)1804 brw_upload_wm_image_surfaces(struct brw_context *brw)
1805 {
1806 /* BRW_NEW_FRAGMENT_PROGRAM */
1807 const struct gl_program *wm = brw->fragment_program;
1808
1809 if (wm) {
1810 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1811 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1812 brw->wm.base.prog_data);
1813 }
1814 }
1815
1816 const struct brw_tracked_state brw_wm_image_surfaces = {
1817 .dirty = {
1818 .mesa = _NEW_TEXTURE,
1819 .brw = BRW_NEW_BATCH |
1820 BRW_NEW_BLORP |
1821 BRW_NEW_FRAGMENT_PROGRAM |
1822 BRW_NEW_FS_PROG_DATA |
1823 BRW_NEW_IMAGE_UNITS
1824 },
1825 .emit = brw_upload_wm_image_surfaces,
1826 };
1827
1828 void
gen4_init_vtable_surface_functions(struct brw_context * brw)1829 gen4_init_vtable_surface_functions(struct brw_context *brw)
1830 {
1831 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1832 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1833 }
1834
1835 void
gen6_init_vtable_surface_functions(struct brw_context * brw)1836 gen6_init_vtable_surface_functions(struct brw_context *brw)
1837 {
1838 gen4_init_vtable_surface_functions(brw);
1839 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1840 }
1841
1842 static void
brw_upload_cs_work_groups_surface(struct brw_context * brw)1843 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1844 {
1845 struct gl_context *ctx = &brw->ctx;
1846 /* _NEW_PROGRAM */
1847 struct gl_shader_program *prog =
1848 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1849 /* BRW_NEW_CS_PROG_DATA */
1850 const struct brw_cs_prog_data *cs_prog_data =
1851 brw_cs_prog_data(brw->cs.base.prog_data);
1852
1853 if (prog && cs_prog_data->uses_num_work_groups) {
1854 const unsigned surf_idx =
1855 cs_prog_data->binding_table.work_groups_start;
1856 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1857 drm_intel_bo *bo;
1858 uint32_t bo_offset;
1859
1860 if (brw->compute.num_work_groups_bo == NULL) {
1861 bo = NULL;
1862 intel_upload_data(brw,
1863 (void *)brw->compute.num_work_groups,
1864 3 * sizeof(GLuint),
1865 sizeof(GLuint),
1866 &bo,
1867 &bo_offset);
1868 } else {
1869 bo = brw->compute.num_work_groups_bo;
1870 bo_offset = brw->compute.num_work_groups_offset;
1871 }
1872
1873 brw_emit_buffer_surface_state(brw, surf_offset,
1874 bo, bo_offset,
1875 BRW_SURFACEFORMAT_RAW,
1876 3 * sizeof(GLuint), 1, true);
1877 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1878 }
1879 }
1880
1881 const struct brw_tracked_state brw_cs_work_groups_surface = {
1882 .dirty = {
1883 .brw = BRW_NEW_BLORP |
1884 BRW_NEW_CS_PROG_DATA |
1885 BRW_NEW_CS_WORK_GROUPS
1886 },
1887 .emit = brw_upload_cs_work_groups_surface,
1888 };
1889