• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2009 VMware, Inc.
4  * Copyright 2007 VMware, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 /**
30  * @file
31  * Code generate the whole fragment pipeline.
32  *
33  * The fragment pipeline consists of the following stages:
34  * - early depth test
35  * - fragment shader
36  * - alpha test
37  * - depth/stencil test
38  * - blending
39  *
40  * This file has only the glue to assemble the fragment pipeline.  The actual
41  * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
42  * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
43  * muster the LLVM JIT execution engine to create a function that follows an
44  * established binary interface and that can be called from C directly.
45  *
46  * A big source of complexity here is that we often want to run different
47  * stages with different precisions and data types and precisions. For example,
48  * the fragment shader needs typically to be done in floats, but the
49  * depth/stencil test and blending is better done in the type that most closely
50  * matches the depth/stencil and color buffer respectively.
51  *
52  * Since the width of a SIMD vector register stays the same regardless of the
53  * element type, different types imply different number of elements, so we must
54  * code generate more instances of the stages with larger types to be able to
55  * feed/consume the stages with smaller types.
56  *
57  * @author Jose Fonseca <jfonseca@vmware.com>
58  */
59 
60 #include <limits.h>
61 #include "pipe/p_defines.h"
62 #include "util/u_inlines.h"
63 #include "util/u_memory.h"
64 #include "util/u_pointer.h"
65 #include "util/format/u_format.h"
66 #include "util/u_dump.h"
67 #include "util/u_string.h"
68 #include "util/simple_list.h"
69 #include "util/u_dual_blend.h"
70 #include "util/os_time.h"
71 #include "pipe/p_shader_tokens.h"
72 #include "draw/draw_context.h"
73 #include "tgsi/tgsi_dump.h"
74 #include "tgsi/tgsi_scan.h"
75 #include "tgsi/tgsi_parse.h"
76 #include "gallivm/lp_bld_type.h"
77 #include "gallivm/lp_bld_const.h"
78 #include "gallivm/lp_bld_conv.h"
79 #include "gallivm/lp_bld_init.h"
80 #include "gallivm/lp_bld_intr.h"
81 #include "gallivm/lp_bld_logic.h"
82 #include "gallivm/lp_bld_tgsi.h"
83 #include "gallivm/lp_bld_nir.h"
84 #include "gallivm/lp_bld_swizzle.h"
85 #include "gallivm/lp_bld_flow.h"
86 #include "gallivm/lp_bld_debug.h"
87 #include "gallivm/lp_bld_arit.h"
88 #include "gallivm/lp_bld_bitarit.h"
89 #include "gallivm/lp_bld_pack.h"
90 #include "gallivm/lp_bld_format.h"
91 #include "gallivm/lp_bld_quad.h"
92 #include "gallivm/lp_bld_gather.h"
93 
94 #include "lp_bld_alpha.h"
95 #include "lp_bld_blend.h"
96 #include "lp_bld_depth.h"
97 #include "lp_bld_interp.h"
98 #include "lp_context.h"
99 #include "lp_debug.h"
100 #include "lp_perf.h"
101 #include "lp_setup.h"
102 #include "lp_state.h"
103 #include "lp_tex_sample.h"
104 #include "lp_flush.h"
105 #include "lp_state_fs.h"
106 #include "lp_rast.h"
107 #include "nir/nir_to_tgsi_info.h"
108 
109 #include "lp_screen.h"
110 #include "compiler/nir/nir_serialize.h"
111 #include "util/mesa-sha1.h"
112 /** Fragment shader number (for debugging) */
113 static unsigned fs_no = 0;
114 
115 static void
116 load_unswizzled_block(struct gallivm_state *gallivm,
117                       LLVMValueRef base_ptr,
118                       LLVMValueRef stride,
119                       unsigned block_width,
120                       unsigned block_height,
121                       LLVMValueRef* dst,
122                       struct lp_type dst_type,
123                       unsigned dst_count,
124                       unsigned dst_alignment,
125                       LLVMValueRef x_offset,
126                       LLVMValueRef y_offset,
127                       bool fb_fetch_twiddle);
128 /**
129  * Checks if a format description is an arithmetic format
130  *
131  * A format which has irregular channel sizes such as R3_G3_B2 or R5_G6_B5.
132  */
133 static inline boolean
is_arithmetic_format(const struct util_format_description * format_desc)134 is_arithmetic_format(const struct util_format_description *format_desc)
135 {
136    boolean arith = false;
137    unsigned i;
138 
139    for (i = 0; i < format_desc->nr_channels; ++i) {
140       arith |= format_desc->channel[i].size != format_desc->channel[0].size;
141       arith |= (format_desc->channel[i].size % 8) != 0;
142    }
143 
144    return arith;
145 }
146 
147 /**
148  * Checks if this format requires special handling due to required expansion
149  * to floats for blending, and furthermore has "natural" packed AoS -> unpacked
150  * SoA conversion.
151  */
152 static inline boolean
format_expands_to_float_soa(const struct util_format_description * format_desc)153 format_expands_to_float_soa(const struct util_format_description *format_desc)
154 {
155    if (format_desc->format == PIPE_FORMAT_R11G11B10_FLOAT ||
156        format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
157       return true;
158    }
159    return false;
160 }
161 
162 
163 /**
164  * Retrieves the type representing the memory layout for a format
165  *
166  * e.g. RGBA16F = 4x half-float and R3G3B2 = 1x byte
167  */
168 static inline void
lp_mem_type_from_format_desc(const struct util_format_description * format_desc,struct lp_type * type)169 lp_mem_type_from_format_desc(const struct util_format_description *format_desc,
170                              struct lp_type* type)
171 {
172    unsigned i;
173    unsigned chan;
174 
175    if (format_expands_to_float_soa(format_desc)) {
176       /* just make this a uint with width of block */
177       type->floating = false;
178       type->fixed = false;
179       type->sign = false;
180       type->norm = false;
181       type->width = format_desc->block.bits;
182       type->length = 1;
183       return;
184    }
185 
186    for (i = 0; i < 4; i++)
187       if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
188          break;
189    chan = i;
190 
191    memset(type, 0, sizeof(struct lp_type));
192    type->floating = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FLOAT;
193    type->fixed    = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FIXED;
194    type->sign     = format_desc->channel[chan].type != UTIL_FORMAT_TYPE_UNSIGNED;
195    type->norm     = format_desc->channel[chan].normalized;
196 
197    if (is_arithmetic_format(format_desc)) {
198       type->width = 0;
199       type->length = 1;
200 
201       for (i = 0; i < format_desc->nr_channels; ++i) {
202          type->width += format_desc->channel[i].size;
203       }
204    } else {
205       type->width = format_desc->channel[chan].size;
206       type->length = format_desc->nr_channels;
207    }
208 }
209 
210 /**
211  * Expand the relevant bits of mask_input to a n*4-dword mask for the
212  * n*four pixels in n 2x2 quads.  This will set the n*four elements of the
213  * quad mask vector to 0 or ~0.
214  * Grouping is 01, 23 for 2 quad mode hence only 0 and 2 are valid
215  * quad arguments with fs length 8.
216  *
217  * \param first_quad  which quad(s) of the quad group to test, in [0,3]
218  * \param mask_input  bitwise mask for the whole 4x4 stamp
219  */
220 static LLVMValueRef
generate_quad_mask(struct gallivm_state * gallivm,struct lp_type fs_type,unsigned first_quad,unsigned sample,LLVMValueRef mask_input)221 generate_quad_mask(struct gallivm_state *gallivm,
222                    struct lp_type fs_type,
223                    unsigned first_quad,
224                    unsigned sample,
225                    LLVMValueRef mask_input) /* int64 */
226 {
227    LLVMBuilderRef builder = gallivm->builder;
228    struct lp_type mask_type;
229    LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
230    LLVMValueRef bits[16];
231    LLVMValueRef mask, bits_vec;
232    int shift, i;
233 
234    /*
235     * XXX: We'll need a different path for 16 x u8
236     */
237    assert(fs_type.width == 32);
238    assert(fs_type.length <= ARRAY_SIZE(bits));
239    mask_type = lp_int_type(fs_type);
240 
241    /*
242     * mask_input >>= (quad * 4)
243     */
244    switch (first_quad) {
245    case 0:
246       shift = 0;
247       break;
248    case 1:
249       assert(fs_type.length == 4);
250       shift = 2;
251       break;
252    case 2:
253       shift = 8;
254       break;
255    case 3:
256       assert(fs_type.length == 4);
257       shift = 10;
258       break;
259    default:
260       assert(0);
261       shift = 0;
262    }
263 
264    mask_input = LLVMBuildLShr(builder, mask_input, lp_build_const_int64(gallivm, 16 * sample), "");
265    mask_input = LLVMBuildTrunc(builder, mask_input,
266                                i32t, "");
267    mask_input = LLVMBuildAnd(builder, mask_input, lp_build_const_int32(gallivm, 0xffff), "");
268 
269    mask_input = LLVMBuildLShr(builder,
270                               mask_input,
271                               LLVMConstInt(i32t, shift, 0),
272                               "");
273 
274    /*
275     * mask = { mask_input & (1 << i), for i in [0,3] }
276     */
277    mask = lp_build_broadcast(gallivm,
278                              lp_build_vec_type(gallivm, mask_type),
279                              mask_input);
280 
281    for (i = 0; i < fs_type.length / 4; i++) {
282       unsigned j = 2 * (i % 2) + (i / 2) * 8;
283       bits[4*i + 0] = LLVMConstInt(i32t, 1ULL << (j + 0), 0);
284       bits[4*i + 1] = LLVMConstInt(i32t, 1ULL << (j + 1), 0);
285       bits[4*i + 2] = LLVMConstInt(i32t, 1ULL << (j + 4), 0);
286       bits[4*i + 3] = LLVMConstInt(i32t, 1ULL << (j + 5), 0);
287    }
288    bits_vec = LLVMConstVector(bits, fs_type.length);
289    mask = LLVMBuildAnd(builder, mask, bits_vec, "");
290 
291    /*
292     * mask = mask == bits ? ~0 : 0
293     */
294    mask = lp_build_compare(gallivm,
295                            mask_type, PIPE_FUNC_EQUAL,
296                            mask, bits_vec);
297 
298    return mask;
299 }
300 
301 
302 #define EARLY_DEPTH_TEST  0x1
303 #define LATE_DEPTH_TEST   0x2
304 #define EARLY_DEPTH_WRITE 0x4
305 #define LATE_DEPTH_WRITE  0x8
306 
307 static int
find_output_by_semantic(const struct tgsi_shader_info * info,unsigned semantic,unsigned index)308 find_output_by_semantic( const struct tgsi_shader_info *info,
309 			 unsigned semantic,
310 			 unsigned index )
311 {
312    int i;
313 
314    for (i = 0; i < info->num_outputs; i++)
315       if (info->output_semantic_name[i] == semantic &&
316 	  info->output_semantic_index[i] == index)
317 	 return i;
318 
319    return -1;
320 }
321 
322 
323 /**
324  * Fetch the specified lp_jit_viewport structure for a given viewport_index.
325  */
326 static LLVMValueRef
lp_llvm_viewport(LLVMValueRef context_ptr,struct gallivm_state * gallivm,LLVMValueRef viewport_index)327 lp_llvm_viewport(LLVMValueRef context_ptr,
328                  struct gallivm_state *gallivm,
329                  LLVMValueRef viewport_index)
330 {
331    LLVMBuilderRef builder = gallivm->builder;
332    LLVMValueRef ptr;
333    LLVMValueRef res;
334    struct lp_type viewport_type =
335       lp_type_float_vec(32, 32 * LP_JIT_VIEWPORT_NUM_FIELDS);
336 
337    ptr = lp_jit_context_viewports(gallivm, context_ptr);
338    ptr = LLVMBuildPointerCast(builder, ptr,
339             LLVMPointerType(lp_build_vec_type(gallivm, viewport_type), 0), "");
340 
341    res = lp_build_pointer_get(builder, ptr, viewport_index);
342 
343    return res;
344 }
345 
346 
347 static LLVMValueRef
lp_build_depth_clamp(struct gallivm_state * gallivm,LLVMBuilderRef builder,struct lp_type type,LLVMValueRef context_ptr,LLVMValueRef thread_data_ptr,LLVMValueRef z)348 lp_build_depth_clamp(struct gallivm_state *gallivm,
349                      LLVMBuilderRef builder,
350                      struct lp_type type,
351                      LLVMValueRef context_ptr,
352                      LLVMValueRef thread_data_ptr,
353                      LLVMValueRef z)
354 {
355    LLVMValueRef viewport, min_depth, max_depth;
356    LLVMValueRef viewport_index;
357    struct lp_build_context f32_bld;
358 
359    assert(type.floating);
360    lp_build_context_init(&f32_bld, gallivm, type);
361 
362    /*
363     * Assumes clamping of the viewport index will occur in setup/gs. Value
364     * is passed through the rasterization stage via lp_rast_shader_inputs.
365     *
366     * See: draw_clamp_viewport_idx and lp_clamp_viewport_idx for clamping
367     *      semantics.
368     */
369    viewport_index = lp_jit_thread_data_raster_state_viewport_index(gallivm,
370                        thread_data_ptr);
371 
372    /*
373     * Load the min and max depth from the lp_jit_context.viewports
374     * array of lp_jit_viewport structures.
375     */
376    viewport = lp_llvm_viewport(context_ptr, gallivm, viewport_index);
377 
378    /* viewports[viewport_index].min_depth */
379    min_depth = LLVMBuildExtractElement(builder, viewport,
380                   lp_build_const_int32(gallivm, LP_JIT_VIEWPORT_MIN_DEPTH), "");
381    min_depth = lp_build_broadcast_scalar(&f32_bld, min_depth);
382 
383    /* viewports[viewport_index].max_depth */
384    max_depth = LLVMBuildExtractElement(builder, viewport,
385                   lp_build_const_int32(gallivm, LP_JIT_VIEWPORT_MAX_DEPTH), "");
386    max_depth = lp_build_broadcast_scalar(&f32_bld, max_depth);
387 
388    /*
389     * Clamp to the min and max depth values for the given viewport.
390     */
391    return lp_build_clamp(&f32_bld, z, min_depth, max_depth);
392 }
393 
394 static void
lp_build_sample_alpha_to_coverage(struct gallivm_state * gallivm,struct lp_type type,unsigned coverage_samples,LLVMValueRef num_loop,LLVMValueRef loop_counter,LLVMValueRef coverage_mask_store,LLVMValueRef alpha)395 lp_build_sample_alpha_to_coverage(struct gallivm_state *gallivm,
396                                   struct lp_type type,
397                                   unsigned coverage_samples,
398                                   LLVMValueRef num_loop,
399                                   LLVMValueRef loop_counter,
400                                   LLVMValueRef coverage_mask_store,
401                                   LLVMValueRef alpha)
402 {
403    struct lp_build_context bld;
404    LLVMBuilderRef builder = gallivm->builder;
405    float step = 1.0 / coverage_samples;
406 
407    lp_build_context_init(&bld, gallivm, type);
408    for (unsigned s = 0; s < coverage_samples; s++) {
409       LLVMValueRef alpha_ref_value = lp_build_const_vec(gallivm, type, step * s);
410       LLVMValueRef test = lp_build_cmp(&bld, PIPE_FUNC_GREATER, alpha, alpha_ref_value);
411 
412       LLVMValueRef s_mask_idx = LLVMBuildMul(builder, lp_build_const_int32(gallivm, s), num_loop, "");
413       s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_counter, "");
414       LLVMValueRef s_mask_ptr = LLVMBuildGEP(builder, coverage_mask_store, &s_mask_idx, 1, "");
415       LLVMValueRef s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
416       s_mask = LLVMBuildAnd(builder, s_mask, test, "");
417       LLVMBuildStore(builder, s_mask, s_mask_ptr);
418    }
419 };
420 
421 struct lp_build_fs_llvm_iface {
422    struct lp_build_fs_iface base;
423    struct lp_build_interp_soa_context *interp;
424    struct lp_build_for_loop_state *loop_state;
425    LLVMValueRef mask_store;
426    LLVMValueRef sample_id;
427    LLVMValueRef color_ptr_ptr;
428    LLVMValueRef color_stride_ptr;
429    LLVMValueRef color_sample_stride_ptr;
430    const struct lp_fragment_shader_variant_key *key;
431 };
432 
fs_interp(const struct lp_build_fs_iface * iface,struct lp_build_context * bld,unsigned attrib,unsigned chan,bool centroid,bool sample,LLVMValueRef attrib_indir,LLVMValueRef offsets[2])433 static LLVMValueRef fs_interp(const struct lp_build_fs_iface *iface,
434                               struct lp_build_context *bld,
435                               unsigned attrib, unsigned chan,
436                               bool centroid, bool sample,
437                               LLVMValueRef attrib_indir,
438                               LLVMValueRef offsets[2])
439 {
440    struct lp_build_fs_llvm_iface *fs_iface = (struct lp_build_fs_llvm_iface *)iface;
441    struct lp_build_interp_soa_context *interp = fs_iface->interp;
442    unsigned loc = TGSI_INTERPOLATE_LOC_CENTER;
443    if (centroid)
444       loc = TGSI_INTERPOLATE_LOC_CENTROID;
445    if (sample)
446       loc = TGSI_INTERPOLATE_LOC_SAMPLE;
447 
448    return lp_build_interp_soa(interp, bld->gallivm, fs_iface->loop_state->counter,
449                               fs_iface->mask_store,
450                               attrib, chan, loc, attrib_indir, offsets);
451 }
452 
fs_fb_fetch(const struct lp_build_fs_iface * iface,struct lp_build_context * bld,unsigned cbuf,LLVMValueRef result[4])453 static void fs_fb_fetch(const struct lp_build_fs_iface *iface,
454                                 struct lp_build_context *bld,
455                                 unsigned cbuf,
456                                 LLVMValueRef result[4])
457 {
458    struct lp_build_fs_llvm_iface *fs_iface = (struct lp_build_fs_llvm_iface *)iface;
459    struct gallivm_state *gallivm = bld->gallivm;
460    LLVMBuilderRef builder = gallivm->builder;
461    const struct lp_fragment_shader_variant_key *key = fs_iface->key;
462    LLVMValueRef index = lp_build_const_int32(gallivm, cbuf);
463    LLVMValueRef color_ptr = LLVMBuildLoad(builder, LLVMBuildGEP(builder, fs_iface->color_ptr_ptr, &index, 1, ""), "");
464    LLVMValueRef stride = LLVMBuildLoad(builder, LLVMBuildGEP(builder, fs_iface->color_stride_ptr, &index, 1, ""), "");
465 
466    LLVMValueRef dst[4 * 4];
467    enum pipe_format cbuf_format = key->cbuf_format[cbuf];
468    const struct util_format_description* out_format_desc = util_format_description(cbuf_format);
469    struct lp_type dst_type;
470    unsigned block_size = bld->type.length;
471    unsigned block_height = key->resource_1d ? 1 : 2;
472    unsigned block_width = block_size / block_height;
473 
474    lp_mem_type_from_format_desc(out_format_desc, &dst_type);
475 
476    struct lp_type blend_type;
477    memset(&blend_type, 0, sizeof blend_type);
478    blend_type.floating = FALSE; /* values are integers */
479    blend_type.sign = FALSE;     /* values are unsigned */
480    blend_type.norm = TRUE;      /* values are in [0,1] or [-1,1] */
481    blend_type.width = 8;        /* 8-bit ubyte values */
482    blend_type.length = 16;      /* 16 elements per vector */
483 
484    uint32_t dst_alignment;
485    /*
486     * Compute the alignment of the destination pointer in bytes
487     * We fetch 1-4 pixels, if the format has pot alignment then those fetches
488     * are always aligned by MIN2(16, fetch_width) except for buffers (not
489     * 1d tex but can't distinguish here) so need to stick with per-pixel
490     * alignment in this case.
491     */
492    if (key->resource_1d) {
493       dst_alignment = (out_format_desc->block.bits + 7)/(out_format_desc->block.width * 8);
494    }
495    else {
496       dst_alignment = dst_type.length * dst_type.width / 8;
497    }
498    /* Force power-of-two alignment by extracting only the least-significant-bit */
499    dst_alignment = 1 << (ffs(dst_alignment) - 1);
500    /*
501     * Resource base and stride pointers are aligned to 16 bytes, so that's
502     * the maximum alignment we can guarantee
503     */
504    dst_alignment = MIN2(16, dst_alignment);
505 
506    LLVMTypeRef blend_vec_type = lp_build_vec_type(gallivm, blend_type);
507    color_ptr = LLVMBuildBitCast(builder, color_ptr, LLVMPointerType(blend_vec_type, 0), "");
508 
509    if (key->multisample) {
510       LLVMValueRef sample_stride = LLVMBuildLoad(builder,
511                                                  LLVMBuildGEP(builder, fs_iface->color_sample_stride_ptr,
512                                                               &index, 1, ""), "");
513       LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_stride, fs_iface->sample_id, "");
514       color_ptr = LLVMBuildGEP(builder, color_ptr, &sample_offset, 1, "");
515    }
516    /* fragment shader executes on 4x4 blocks. depending on vector width it can execute 2 or 4 iterations.
517     * only move to the next row once the top row has completed 8 wide 1 iteration, 4 wide 2 iterations */
518    LLVMValueRef x_offset = NULL, y_offset = NULL;
519    if (!key->resource_1d) {
520       LLVMValueRef counter = fs_iface->loop_state->counter;
521 
522       if (block_size == 4) {
523          x_offset = LLVMBuildShl(builder,
524                                  LLVMBuildAnd(builder, fs_iface->loop_state->counter, lp_build_const_int32(gallivm, 1), ""),
525                                  lp_build_const_int32(gallivm, 1), "");
526          counter = LLVMBuildLShr(builder, fs_iface->loop_state->counter, lp_build_const_int32(gallivm, 1), "");
527       }
528       y_offset = LLVMBuildMul(builder, counter, lp_build_const_int32(gallivm, 2), "");
529    }
530    load_unswizzled_block(gallivm, color_ptr, stride, block_width, block_height, dst, dst_type, block_size, dst_alignment, x_offset, y_offset, true);
531 
532    for (unsigned i = 0; i < block_size; i++) {
533       dst[i] = LLVMBuildBitCast(builder, dst[i], LLVMInt32TypeInContext(gallivm->context), "");
534    }
535    LLVMValueRef packed = lp_build_gather_values(gallivm, dst, block_size);
536 
537    struct lp_type texel_type = bld->type;
538    if (out_format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB &&
539        out_format_desc->channel[0].pure_integer) {
540       if (out_format_desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
541          texel_type = lp_type_int_vec(bld->type.width, bld->type.width * bld->type.length);
542       }
543       else if (out_format_desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED) {
544          texel_type = lp_type_uint_vec(bld->type.width, bld->type.width * bld->type.length);
545       }
546    }
547    lp_build_unpack_rgba_soa(gallivm, out_format_desc,
548                             texel_type,
549                             packed, result);
550 }
551 
552 /**
553  * Generate the fragment shader, depth/stencil test, and alpha tests.
554  */
555 static void
generate_fs_loop(struct gallivm_state * gallivm,struct lp_fragment_shader * shader,const struct lp_fragment_shader_variant_key * key,LLVMBuilderRef builder,struct lp_type type,LLVMValueRef context_ptr,LLVMValueRef sample_pos_array,LLVMValueRef num_loop,struct lp_build_interp_soa_context * interp,const struct lp_build_sampler_soa * sampler,const struct lp_build_image_soa * image,LLVMValueRef mask_store,LLVMValueRef (* out_color)[4],LLVMValueRef depth_base_ptr,LLVMValueRef depth_stride,LLVMValueRef depth_sample_stride,LLVMValueRef color_ptr_ptr,LLVMValueRef color_stride_ptr,LLVMValueRef color_sample_stride_ptr,LLVMValueRef facing,LLVMValueRef thread_data_ptr)556 generate_fs_loop(struct gallivm_state *gallivm,
557                  struct lp_fragment_shader *shader,
558                  const struct lp_fragment_shader_variant_key *key,
559                  LLVMBuilderRef builder,
560                  struct lp_type type,
561                  LLVMValueRef context_ptr,
562                  LLVMValueRef sample_pos_array,
563                  LLVMValueRef num_loop,
564                  struct lp_build_interp_soa_context *interp,
565                  const struct lp_build_sampler_soa *sampler,
566                  const struct lp_build_image_soa *image,
567                  LLVMValueRef mask_store,
568                  LLVMValueRef (*out_color)[4],
569                  LLVMValueRef depth_base_ptr,
570                  LLVMValueRef depth_stride,
571                  LLVMValueRef depth_sample_stride,
572                  LLVMValueRef color_ptr_ptr,
573                  LLVMValueRef color_stride_ptr,
574                  LLVMValueRef color_sample_stride_ptr,
575                  LLVMValueRef facing,
576                  LLVMValueRef thread_data_ptr)
577 {
578    const struct util_format_description *zs_format_desc = NULL;
579    const struct tgsi_token *tokens = shader->base.tokens;
580    struct lp_type int_type = lp_int_type(type);
581    LLVMTypeRef vec_type, int_vec_type;
582    LLVMValueRef mask_ptr = NULL, mask_val = NULL;
583    LLVMValueRef consts_ptr, num_consts_ptr;
584    LLVMValueRef ssbo_ptr, num_ssbo_ptr;
585    LLVMValueRef z;
586    LLVMValueRef z_value, s_value;
587    LLVMValueRef z_fb, s_fb;
588    LLVMValueRef depth_ptr;
589    LLVMValueRef stencil_refs[2];
590    LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
591    LLVMValueRef zs_samples = lp_build_const_int32(gallivm, key->zsbuf_nr_samples);
592    struct lp_build_for_loop_state loop_state, sample_loop_state;
593    struct lp_build_mask_context mask;
594    /*
595     * TODO: figure out if simple_shader optimization is really worthwile to
596     * keep. Disabled because it may hide some real bugs in the (depth/stencil)
597     * code since tests tend to take another codepath than real shaders.
598     */
599    boolean simple_shader = (shader->info.base.file_count[TGSI_FILE_SAMPLER] == 0 &&
600                             shader->info.base.num_inputs < 3 &&
601                             shader->info.base.num_instructions < 8) && 0;
602    const boolean dual_source_blend = key->blend.rt[0].blend_enable &&
603                                      util_blend_state_is_dual(&key->blend, 0);
604    const bool post_depth_coverage = shader->info.base.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE];
605    unsigned attrib;
606    unsigned chan;
607    unsigned cbuf;
608    unsigned depth_mode;
609 
610    struct lp_bld_tgsi_system_values system_values;
611 
612    memset(&system_values, 0, sizeof(system_values));
613 
614    /* truncate then sign extend. */
615    system_values.front_facing = LLVMBuildTrunc(gallivm->builder, facing, LLVMInt1TypeInContext(gallivm->context), "");
616    system_values.front_facing = LLVMBuildSExt(gallivm->builder, system_values.front_facing, LLVMInt32TypeInContext(gallivm->context), "");
617 
618    if (key->depth.enabled ||
619        key->stencil[0].enabled) {
620 
621       zs_format_desc = util_format_description(key->zsbuf_format);
622       assert(zs_format_desc);
623 
624       if (shader->info.base.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL])
625          depth_mode = EARLY_DEPTH_TEST | EARLY_DEPTH_WRITE;
626       else if (!shader->info.base.writes_z && !shader->info.base.writes_stencil) {
627          if (shader->info.base.writes_memory)
628             depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
629          else if (key->alpha.enabled ||
630              key->blend.alpha_to_coverage ||
631              shader->info.base.uses_kill ||
632              shader->info.base.writes_samplemask) {
633             /* With alpha test and kill, can do the depth test early
634              * and hopefully eliminate some quads.  But need to do a
635              * special deferred depth write once the final mask value
636              * is known. This only works though if there's either no
637              * stencil test or the stencil value isn't written.
638              */
639             if (key->stencil[0].enabled && (key->stencil[0].writemask ||
640                                             (key->stencil[1].enabled &&
641                                              key->stencil[1].writemask)))
642                depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
643             else
644                depth_mode = EARLY_DEPTH_TEST | LATE_DEPTH_WRITE;
645          }
646          else
647             depth_mode = EARLY_DEPTH_TEST | EARLY_DEPTH_WRITE;
648       }
649       else {
650          depth_mode = LATE_DEPTH_TEST | LATE_DEPTH_WRITE;
651       }
652 
653       if (!(key->depth.enabled && key->depth.writemask) &&
654           !(key->stencil[0].enabled && (key->stencil[0].writemask ||
655                                         (key->stencil[1].enabled &&
656                                          key->stencil[1].writemask))))
657          depth_mode &= ~(LATE_DEPTH_WRITE | EARLY_DEPTH_WRITE);
658    }
659    else {
660       depth_mode = 0;
661    }
662 
663    vec_type = lp_build_vec_type(gallivm, type);
664    int_vec_type = lp_build_vec_type(gallivm, int_type);
665 
666    stencil_refs[0] = lp_jit_context_stencil_ref_front_value(gallivm, context_ptr);
667    stencil_refs[1] = lp_jit_context_stencil_ref_back_value(gallivm, context_ptr);
668    /* convert scalar stencil refs into vectors */
669    stencil_refs[0] = lp_build_broadcast(gallivm, int_vec_type, stencil_refs[0]);
670    stencil_refs[1] = lp_build_broadcast(gallivm, int_vec_type, stencil_refs[1]);
671 
672    consts_ptr = lp_jit_context_constants(gallivm, context_ptr);
673    num_consts_ptr = lp_jit_context_num_constants(gallivm, context_ptr);
674 
675    ssbo_ptr = lp_jit_context_ssbos(gallivm, context_ptr);
676    num_ssbo_ptr = lp_jit_context_num_ssbos(gallivm, context_ptr);
677 
678    memset(outputs, 0, sizeof outputs);
679 
680    /* Allocate color storage for each fragment sample */
681    LLVMValueRef color_store_size = num_loop;
682    if (key->min_samples > 1)
683       color_store_size = LLVMBuildMul(builder, num_loop, lp_build_const_int32(gallivm, key->min_samples), "");
684 
685    for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
686       for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
687          out_color[cbuf][chan] = lp_build_array_alloca(gallivm,
688                                                        lp_build_vec_type(gallivm,
689                                                                          type),
690                                                        color_store_size, "color");
691       }
692    }
693    if (dual_source_blend) {
694       assert(key->nr_cbufs <= 1);
695       for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
696          out_color[1][chan] = lp_build_array_alloca(gallivm,
697                                                     lp_build_vec_type(gallivm,
698                                                                       type),
699                                                     color_store_size, "color1");
700       }
701    }
702 
703    lp_build_for_loop_begin(&loop_state, gallivm,
704                            lp_build_const_int32(gallivm, 0),
705                            LLVMIntULT,
706                            num_loop,
707                            lp_build_const_int32(gallivm, 1));
708 
709    LLVMValueRef sample_mask_in;
710    if (key->multisample) {
711       sample_mask_in = lp_build_const_int_vec(gallivm, type, 0);
712       /* create shader execution mask by combining all sample masks. */
713       for (unsigned s = 0; s < key->coverage_samples; s++) {
714          LLVMValueRef s_mask_idx = LLVMBuildMul(builder, num_loop, lp_build_const_int32(gallivm, s), "");
715          s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
716          LLVMValueRef s_mask = lp_build_pointer_get(builder, mask_store, s_mask_idx);
717          if (s == 0)
718             mask_val = s_mask;
719          else
720             mask_val = LLVMBuildOr(builder, s_mask, mask_val, "");
721 
722          LLVMValueRef mask_in = LLVMBuildAnd(builder, s_mask, lp_build_const_int_vec(gallivm, type, (1 << s)), "");
723          sample_mask_in = LLVMBuildOr(builder, sample_mask_in, mask_in, "");
724       }
725    } else {
726       sample_mask_in = lp_build_const_int_vec(gallivm, type, 1);
727       mask_ptr = LLVMBuildGEP(builder, mask_store,
728                               &loop_state.counter, 1, "mask_ptr");
729       mask_val = LLVMBuildLoad(builder, mask_ptr, "");
730 
731       LLVMValueRef mask_in = LLVMBuildAnd(builder, mask_val, lp_build_const_int_vec(gallivm, type, 1), "");
732       sample_mask_in = LLVMBuildOr(builder, sample_mask_in, mask_in, "");
733    }
734 
735    /* 'mask' will control execution based on quad's pixel alive/killed state */
736    lp_build_mask_begin(&mask, gallivm, type, mask_val);
737 
738    if (!(depth_mode & EARLY_DEPTH_TEST) && !simple_shader)
739       lp_build_mask_check(&mask);
740 
741    /* Create storage for recombining sample masks after early Z pass. */
742    LLVMValueRef s_mask_or = lp_build_alloca(gallivm, lp_build_int_vec_type(gallivm, type), "cov_mask_early_depth");
743    LLVMBuildStore(builder, LLVMConstNull(lp_build_int_vec_type(gallivm, type)), s_mask_or);
744 
745    /* Create storage for post depth sample mask */
746    LLVMValueRef post_depth_sample_mask_in = NULL;
747    if (post_depth_coverage)
748       post_depth_sample_mask_in = lp_build_alloca(gallivm, int_vec_type, "post_depth_sample_mask_in");
749 
750    LLVMValueRef s_mask = NULL, s_mask_ptr = NULL;
751    LLVMValueRef z_sample_value_store = NULL, s_sample_value_store = NULL;
752    LLVMValueRef z_fb_store = NULL, s_fb_store = NULL;
753    LLVMTypeRef z_type = NULL, z_fb_type = NULL;
754 
755    /* Run early depth once per sample */
756    if (key->multisample) {
757 
758       if (zs_format_desc) {
759          struct lp_type zs_type = lp_depth_type(zs_format_desc, type.length);
760          struct lp_type z_type = zs_type;
761          struct lp_type s_type = zs_type;
762          if (zs_format_desc->block.bits < type.width)
763             z_type.width = type.width;
764          if (zs_format_desc->block.bits == 8)
765             s_type.width = type.width;
766 
767          else if (zs_format_desc->block.bits > 32) {
768             z_type.width = z_type.width / 2;
769             s_type.width = s_type.width / 2;
770             s_type.floating = 0;
771          }
772          z_sample_value_store = lp_build_array_alloca(gallivm, lp_build_int_vec_type(gallivm, type),
773                                                       zs_samples, "z_sample_store");
774          s_sample_value_store = lp_build_array_alloca(gallivm, lp_build_int_vec_type(gallivm, type),
775                                                       zs_samples, "s_sample_store");
776          z_fb_store = lp_build_array_alloca(gallivm, lp_build_vec_type(gallivm, z_type),
777                                             zs_samples, "z_fb_store");
778          s_fb_store = lp_build_array_alloca(gallivm, lp_build_vec_type(gallivm, s_type),
779                                             zs_samples, "s_fb_store");
780       }
781       lp_build_for_loop_begin(&sample_loop_state, gallivm,
782                               lp_build_const_int32(gallivm, 0),
783                               LLVMIntULT, lp_build_const_int32(gallivm, key->coverage_samples),
784                               lp_build_const_int32(gallivm, 1));
785 
786       LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
787       s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
788       s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
789 
790       s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
791       s_mask = LLVMBuildAnd(builder, s_mask, mask_val, "");
792    }
793 
794 
795    /* for multisample Z needs to be interpolated at sample points for testing. */
796    lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, key->multisample ? sample_loop_state.counter : NULL);
797    z = interp->pos[2];
798 
799    depth_ptr = depth_base_ptr;
800    if (key->multisample) {
801       LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_loop_state.counter, depth_sample_stride, "");
802       depth_ptr = LLVMBuildGEP(builder, depth_ptr, &sample_offset, 1, "");
803    }
804 
805    if (depth_mode & EARLY_DEPTH_TEST) {
806       /*
807        * Clamp according to ARB_depth_clamp semantics.
808        */
809       if (key->depth_clamp) {
810          z = lp_build_depth_clamp(gallivm, builder, type, context_ptr,
811                                   thread_data_ptr, z);
812       }
813       lp_build_depth_stencil_load_swizzled(gallivm, type,
814                                            zs_format_desc, key->resource_1d,
815                                            depth_ptr, depth_stride,
816                                            &z_fb, &s_fb, loop_state.counter);
817       lp_build_depth_stencil_test(gallivm,
818                                   &key->depth,
819                                   key->stencil,
820                                   type,
821                                   zs_format_desc,
822                                   key->multisample ? NULL : &mask,
823                                   &s_mask,
824                                   stencil_refs,
825                                   z, z_fb, s_fb,
826                                   facing,
827                                   &z_value, &s_value,
828                                   !simple_shader && !key->multisample);
829 
830       if (depth_mode & EARLY_DEPTH_WRITE) {
831          lp_build_depth_stencil_write_swizzled(gallivm, type,
832                                                zs_format_desc, key->resource_1d,
833                                                NULL, NULL, NULL, loop_state.counter,
834                                                depth_ptr, depth_stride,
835                                                z_value, s_value);
836       }
837       /*
838        * Note mask check if stencil is enabled must be after ds write not after
839        * stencil test otherwise new stencil values may not get written if all
840        * fragments got killed by depth/stencil test.
841        */
842       if (!simple_shader && key->stencil[0].enabled && !key->multisample)
843          lp_build_mask_check(&mask);
844 
845       if (key->multisample) {
846          z_fb_type = LLVMTypeOf(z_fb);
847          z_type = LLVMTypeOf(z_value);
848          lp_build_pointer_set(builder, z_sample_value_store, sample_loop_state.counter, LLVMBuildBitCast(builder, z_value, lp_build_int_vec_type(gallivm, type), ""));
849          lp_build_pointer_set(builder, s_sample_value_store, sample_loop_state.counter, LLVMBuildBitCast(builder, s_value, lp_build_int_vec_type(gallivm, type), ""));
850          lp_build_pointer_set(builder, z_fb_store, sample_loop_state.counter, z_fb);
851          lp_build_pointer_set(builder, s_fb_store, sample_loop_state.counter, s_fb);
852       }
853    }
854 
855    if (key->multisample) {
856       /*
857        * Store the post-early Z coverage mask.
858        * Recombine the resulting coverage masks post early Z into the fragment
859        * shader execution mask.
860        */
861       LLVMValueRef tmp_s_mask_or = LLVMBuildLoad(builder, s_mask_or, "");
862       tmp_s_mask_or = LLVMBuildOr(builder, tmp_s_mask_or, s_mask, "");
863       LLVMBuildStore(builder, tmp_s_mask_or, s_mask_or);
864 
865       if (post_depth_coverage) {
866          LLVMValueRef mask_bit_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
867          LLVMValueRef post_depth_mask_in = LLVMBuildLoad(builder, post_depth_sample_mask_in, "");
868          mask_bit_idx = LLVMBuildAnd(builder, s_mask, lp_build_broadcast(gallivm, int_vec_type, mask_bit_idx), "");
869          post_depth_mask_in = LLVMBuildOr(builder, post_depth_mask_in, mask_bit_idx, "");
870          LLVMBuildStore(builder, post_depth_mask_in, post_depth_sample_mask_in);
871       }
872 
873       LLVMBuildStore(builder, s_mask, s_mask_ptr);
874 
875       lp_build_for_loop_end(&sample_loop_state);
876 
877       /* recombined all the coverage masks in the shader exec mask. */
878       tmp_s_mask_or = LLVMBuildLoad(builder, s_mask_or, "");
879       lp_build_mask_update(&mask, tmp_s_mask_or);
880 
881       if (key->min_samples == 1) {
882          /* for multisample Z needs to be re interpolated at pixel center */
883          lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, NULL);
884          z = interp->pos[2];
885          lp_build_mask_update(&mask, tmp_s_mask_or);
886       }
887    } else {
888       if (post_depth_coverage) {
889          LLVMValueRef post_depth_mask_in = LLVMBuildAnd(builder, lp_build_mask_value(&mask), lp_build_const_int_vec(gallivm, type, 1), "");
890          LLVMBuildStore(builder, post_depth_mask_in, post_depth_sample_mask_in);
891       }
892    }
893 
894    LLVMValueRef out_sample_mask_storage = NULL;
895    if (shader->info.base.writes_samplemask) {
896       out_sample_mask_storage = lp_build_alloca(gallivm, int_vec_type, "write_mask");
897       if (key->min_samples > 1)
898          LLVMBuildStore(builder, LLVMConstNull(int_vec_type), out_sample_mask_storage);
899    }
900 
901    if (post_depth_coverage) {
902       system_values.sample_mask_in = LLVMBuildLoad(builder, post_depth_sample_mask_in, "");
903    }
904    else
905       system_values.sample_mask_in = sample_mask_in;
906    if (key->multisample && key->min_samples > 1) {
907       lp_build_for_loop_begin(&sample_loop_state, gallivm,
908                               lp_build_const_int32(gallivm, 0),
909                               LLVMIntULT,
910                               lp_build_const_int32(gallivm, key->min_samples),
911                               lp_build_const_int32(gallivm, 1));
912 
913       LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
914       s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
915       s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
916       s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
917       lp_build_mask_force(&mask, s_mask);
918       lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, sample_loop_state.counter);
919       system_values.sample_id = sample_loop_state.counter;
920       system_values.sample_mask_in = LLVMBuildAnd(builder, system_values.sample_mask_in,
921                                                   lp_build_broadcast(gallivm, int_vec_type,
922                                                                      LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "")), "");
923    } else {
924       system_values.sample_id = lp_build_const_int32(gallivm, 0);
925 
926    }
927    system_values.sample_pos = sample_pos_array;
928 
929    lp_build_interp_soa_update_inputs_dyn(interp, gallivm, loop_state.counter, mask_store, sample_loop_state.counter);
930 
931    struct lp_build_fs_llvm_iface fs_iface = {
932      .base.interp_fn = fs_interp,
933      .base.fb_fetch = fs_fb_fetch,
934      .interp = interp,
935      .loop_state = &loop_state,
936      .sample_id = system_values.sample_id,
937      .mask_store = mask_store,
938      .color_ptr_ptr = color_ptr_ptr,
939      .color_stride_ptr = color_stride_ptr,
940      .color_sample_stride_ptr = color_sample_stride_ptr,
941      .key = key,
942    };
943 
944    struct lp_build_tgsi_params params;
945    memset(&params, 0, sizeof(params));
946 
947    params.type = type;
948    params.mask = &mask;
949    params.fs_iface = &fs_iface.base;
950    params.consts_ptr = consts_ptr;
951    params.const_sizes_ptr = num_consts_ptr;
952    params.system_values = &system_values;
953    params.inputs = interp->inputs;
954    params.context_ptr = context_ptr;
955    params.thread_data_ptr = thread_data_ptr;
956    params.sampler = sampler;
957    params.info = &shader->info.base;
958    params.ssbo_ptr = ssbo_ptr;
959    params.ssbo_sizes_ptr = num_ssbo_ptr;
960    params.image = image;
961 
962    /* Build the actual shader */
963    if (shader->base.type == PIPE_SHADER_IR_TGSI)
964       lp_build_tgsi_soa(gallivm, tokens, &params,
965                         outputs);
966    else
967       lp_build_nir_soa(gallivm, shader->base.ir.nir, &params,
968                        outputs);
969 
970    /* Alpha test */
971    if (key->alpha.enabled) {
972       int color0 = find_output_by_semantic(&shader->info.base,
973                                            TGSI_SEMANTIC_COLOR,
974                                            0);
975 
976       if (color0 != -1 && outputs[color0][3]) {
977          const struct util_format_description *cbuf_format_desc;
978          LLVMValueRef alpha = LLVMBuildLoad(builder, outputs[color0][3], "alpha");
979          LLVMValueRef alpha_ref_value;
980 
981          alpha_ref_value = lp_jit_context_alpha_ref_value(gallivm, context_ptr);
982          alpha_ref_value = lp_build_broadcast(gallivm, vec_type, alpha_ref_value);
983 
984          cbuf_format_desc = util_format_description(key->cbuf_format[0]);
985 
986          lp_build_alpha_test(gallivm, key->alpha.func, type, cbuf_format_desc,
987                              &mask, alpha, alpha_ref_value,
988                              (depth_mode & LATE_DEPTH_TEST) != 0);
989       }
990    }
991 
992    /* Emulate Alpha to Coverage with Alpha test */
993    if (key->blend.alpha_to_coverage) {
994       int color0 = find_output_by_semantic(&shader->info.base,
995                                            TGSI_SEMANTIC_COLOR,
996                                            0);
997 
998       if (color0 != -1 && outputs[color0][3]) {
999          LLVMValueRef alpha = LLVMBuildLoad(builder, outputs[color0][3], "alpha");
1000 
1001          if (!key->multisample) {
1002             lp_build_alpha_to_coverage(gallivm, type,
1003                                        &mask, alpha,
1004                                        (depth_mode & LATE_DEPTH_TEST) != 0);
1005          } else {
1006             lp_build_sample_alpha_to_coverage(gallivm, type, key->coverage_samples, num_loop,
1007                                               loop_state.counter,
1008                                               mask_store, alpha);
1009          }
1010       }
1011    }
1012    if (key->blend.alpha_to_one && key->multisample) {
1013       for (attrib = 0; attrib < shader->info.base.num_outputs; ++attrib) {
1014          unsigned cbuf = shader->info.base.output_semantic_index[attrib];
1015          if ((shader->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_COLOR) &&
1016              ((cbuf < key->nr_cbufs) || (cbuf == 1 && dual_source_blend)))
1017             if (outputs[cbuf][3]) {
1018                LLVMBuildStore(builder, lp_build_const_vec(gallivm, type, 1.0), outputs[cbuf][3]);
1019             }
1020       }
1021    }
1022    if (shader->info.base.writes_samplemask) {
1023       LLVMValueRef output_smask = NULL;
1024       int smaski = find_output_by_semantic(&shader->info.base,
1025                                            TGSI_SEMANTIC_SAMPLEMASK,
1026                                            0);
1027       struct lp_build_context smask_bld;
1028       lp_build_context_init(&smask_bld, gallivm, int_type);
1029 
1030       assert(smaski >= 0);
1031       output_smask = LLVMBuildLoad(builder, outputs[smaski][0], "smask");
1032       output_smask = LLVMBuildBitCast(builder, output_smask, smask_bld.vec_type, "");
1033       if (!key->multisample && key->no_ms_sample_mask_out) {
1034          output_smask = lp_build_and(&smask_bld, output_smask, smask_bld.one);
1035          output_smask = lp_build_cmp(&smask_bld, PIPE_FUNC_NOTEQUAL, output_smask, smask_bld.zero);
1036          lp_build_mask_update(&mask, output_smask);
1037       }
1038 
1039       if (key->min_samples > 1) {
1040          /* only the bit corresponding to this sample is to be used. */
1041          LLVMValueRef tmp_mask = LLVMBuildLoad(builder, out_sample_mask_storage, "tmp_mask");
1042          LLVMValueRef out_smask_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
1043          LLVMValueRef smask_bit = LLVMBuildAnd(builder, output_smask, lp_build_broadcast(gallivm, int_vec_type, out_smask_idx), "");
1044          output_smask = LLVMBuildOr(builder, tmp_mask, smask_bit, "");
1045       }
1046 
1047       LLVMBuildStore(builder, output_smask, out_sample_mask_storage);
1048    }
1049 
1050    /* Color write - per fragment sample */
1051    for (attrib = 0; attrib < shader->info.base.num_outputs; ++attrib)
1052    {
1053       unsigned cbuf = shader->info.base.output_semantic_index[attrib];
1054       if ((shader->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_COLOR) &&
1055            ((cbuf < key->nr_cbufs) || (cbuf == 1 && dual_source_blend)))
1056       {
1057          for(chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
1058             if(outputs[attrib][chan]) {
1059                /* XXX: just initialize outputs to point at colors[] and
1060                 * skip this.
1061                 */
1062                LLVMValueRef out = LLVMBuildLoad(builder, outputs[attrib][chan], "");
1063                LLVMValueRef color_ptr;
1064                LLVMValueRef color_idx = loop_state.counter;
1065                if (key->min_samples > 1)
1066                   color_idx = LLVMBuildAdd(builder, color_idx,
1067                                            LLVMBuildMul(builder, sample_loop_state.counter, num_loop, ""), "");
1068                color_ptr = LLVMBuildGEP(builder, out_color[cbuf][chan],
1069                                         &color_idx, 1, "");
1070                lp_build_name(out, "color%u.%c", attrib, "rgba"[chan]);
1071                LLVMBuildStore(builder, out, color_ptr);
1072             }
1073          }
1074       }
1075    }
1076 
1077    if (key->multisample && key->min_samples > 1) {
1078       LLVMBuildStore(builder, lp_build_mask_value(&mask), s_mask_ptr);
1079       lp_build_for_loop_end(&sample_loop_state);
1080    }
1081 
1082    if (key->multisample) {
1083       /* execute depth test for each sample */
1084       lp_build_for_loop_begin(&sample_loop_state, gallivm,
1085                               lp_build_const_int32(gallivm, 0),
1086                               LLVMIntULT, lp_build_const_int32(gallivm, key->coverage_samples),
1087                               lp_build_const_int32(gallivm, 1));
1088 
1089       /* load the per-sample coverage mask */
1090       LLVMValueRef s_mask_idx = LLVMBuildMul(builder, sample_loop_state.counter, num_loop, "");
1091       s_mask_idx = LLVMBuildAdd(builder, s_mask_idx, loop_state.counter, "");
1092       s_mask_ptr = LLVMBuildGEP(builder, mask_store, &s_mask_idx, 1, "");
1093 
1094       /* combine the execution mask post fragment shader with the coverage mask. */
1095       s_mask = LLVMBuildLoad(builder, s_mask_ptr, "");
1096       if (key->min_samples == 1)
1097          s_mask = LLVMBuildAnd(builder, s_mask, lp_build_mask_value(&mask), "");
1098 
1099       /* if the shader writes sample mask use that */
1100       if (shader->info.base.writes_samplemask) {
1101          LLVMValueRef out_smask_idx = LLVMBuildShl(builder, lp_build_const_int32(gallivm, 1), sample_loop_state.counter, "");
1102          out_smask_idx = lp_build_broadcast(gallivm, int_vec_type, out_smask_idx);
1103          LLVMValueRef output_smask = LLVMBuildLoad(builder, out_sample_mask_storage, "");
1104          LLVMValueRef smask_bit = LLVMBuildAnd(builder, output_smask, out_smask_idx, "");
1105          LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntNE, smask_bit, lp_build_const_int_vec(gallivm, int_type, 0), "");
1106          smask_bit = LLVMBuildSExt(builder, cmp, int_vec_type, "");
1107 
1108          s_mask = LLVMBuildAnd(builder, s_mask, smask_bit, "");
1109       }
1110    }
1111 
1112    depth_ptr = depth_base_ptr;
1113    if (key->multisample) {
1114       LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_loop_state.counter, depth_sample_stride, "");
1115       depth_ptr = LLVMBuildGEP(builder, depth_ptr, &sample_offset, 1, "");
1116    }
1117 
1118    /* Late Z test */
1119    if (depth_mode & LATE_DEPTH_TEST) {
1120       int pos0 = find_output_by_semantic(&shader->info.base,
1121                                          TGSI_SEMANTIC_POSITION,
1122                                          0);
1123       int s_out = find_output_by_semantic(&shader->info.base,
1124                                           TGSI_SEMANTIC_STENCIL,
1125                                           0);
1126       if (pos0 != -1 && outputs[pos0][2]) {
1127          z = LLVMBuildLoad(builder, outputs[pos0][2], "output.z");
1128       } else {
1129          if (key->multisample) {
1130             lp_build_interp_soa_update_pos_dyn(interp, gallivm, loop_state.counter, key->multisample ? sample_loop_state.counter : NULL);
1131             z = interp->pos[2];
1132          }
1133       }
1134       /*
1135        * Clamp according to ARB_depth_clamp semantics.
1136        */
1137       if (key->depth_clamp) {
1138          z = lp_build_depth_clamp(gallivm, builder, type, context_ptr,
1139                                   thread_data_ptr, z);
1140       }
1141 
1142       if (s_out != -1 && outputs[s_out][1]) {
1143          /* there's only one value, and spec says to discard additional bits */
1144          LLVMValueRef s_max_mask = lp_build_const_int_vec(gallivm, int_type, 255);
1145          stencil_refs[0] = LLVMBuildLoad(builder, outputs[s_out][1], "output.s");
1146          stencil_refs[0] = LLVMBuildBitCast(builder, stencil_refs[0], int_vec_type, "");
1147          stencil_refs[0] = LLVMBuildAnd(builder, stencil_refs[0], s_max_mask, "");
1148          stencil_refs[1] = stencil_refs[0];
1149       }
1150 
1151       lp_build_depth_stencil_load_swizzled(gallivm, type,
1152                                            zs_format_desc, key->resource_1d,
1153                                            depth_ptr, depth_stride,
1154                                            &z_fb, &s_fb, loop_state.counter);
1155 
1156       lp_build_depth_stencil_test(gallivm,
1157                                   &key->depth,
1158                                   key->stencil,
1159                                   type,
1160                                   zs_format_desc,
1161                                   key->multisample ? NULL : &mask,
1162                                   &s_mask,
1163                                   stencil_refs,
1164                                   z, z_fb, s_fb,
1165                                   facing,
1166                                   &z_value, &s_value,
1167                                   !simple_shader);
1168       /* Late Z write */
1169       if (depth_mode & LATE_DEPTH_WRITE) {
1170          lp_build_depth_stencil_write_swizzled(gallivm, type,
1171                                                zs_format_desc, key->resource_1d,
1172                                                NULL, NULL, NULL, loop_state.counter,
1173                                                depth_ptr, depth_stride,
1174                                                z_value, s_value);
1175       }
1176    }
1177    else if ((depth_mode & EARLY_DEPTH_TEST) &&
1178             (depth_mode & LATE_DEPTH_WRITE))
1179    {
1180       /* Need to apply a reduced mask to the depth write.  Reload the
1181        * depth value, update from zs_value with the new mask value and
1182        * write that out.
1183        */
1184       if (key->multisample) {
1185          z_value = LLVMBuildBitCast(builder, lp_build_pointer_get(builder, z_sample_value_store, sample_loop_state.counter), z_type, "");;
1186          s_value = lp_build_pointer_get(builder, s_sample_value_store, sample_loop_state.counter);
1187          z_fb = LLVMBuildBitCast(builder, lp_build_pointer_get(builder, z_fb_store, sample_loop_state.counter), z_fb_type, "");
1188          s_fb = lp_build_pointer_get(builder, s_fb_store, sample_loop_state.counter);
1189       }
1190       lp_build_depth_stencil_write_swizzled(gallivm, type,
1191                                             zs_format_desc, key->resource_1d,
1192                                             key->multisample ? s_mask : lp_build_mask_value(&mask), z_fb, s_fb, loop_state.counter,
1193                                             depth_ptr, depth_stride,
1194                                             z_value, s_value);
1195    }
1196 
1197    if (key->occlusion_count) {
1198       LLVMValueRef counter = lp_jit_thread_data_counter(gallivm, thread_data_ptr);
1199       lp_build_name(counter, "counter");
1200 
1201       lp_build_occlusion_count(gallivm, type,
1202                                key->multisample ? s_mask : lp_build_mask_value(&mask), counter);
1203    }
1204 
1205    if (key->multisample) {
1206       /* store the sample mask for this loop */
1207       LLVMBuildStore(builder, s_mask, s_mask_ptr);
1208       lp_build_for_loop_end(&sample_loop_state);
1209    }
1210 
1211    mask_val = lp_build_mask_end(&mask);
1212    if (!key->multisample)
1213       LLVMBuildStore(builder, mask_val, mask_ptr);
1214    lp_build_for_loop_end(&loop_state);
1215 }
1216 
1217 
1218 /**
1219  * This function will reorder pixels from the fragment shader SoA to memory layout AoS
1220  *
1221  * Fragment Shader outputs pixels in small 2x2 blocks
1222  *  e.g. (0, 0), (1, 0), (0, 1), (1, 1) ; (2, 0) ...
1223  *
1224  * However in memory pixels are stored in rows
1225  *  e.g. (0, 0), (1, 0), (2, 0), (3, 0) ; (0, 1) ...
1226  *
1227  * @param type            fragment shader type (4x or 8x float)
1228  * @param num_fs          number of fs_src
1229  * @param is_1d           whether we're outputting to a 1d resource
1230  * @param dst_channels    number of output channels
1231  * @param fs_src          output from fragment shader
1232  * @param dst             pointer to store result
1233  * @param pad_inline      is channel padding inline or at end of row
1234  * @return                the number of dsts
1235  */
1236 static int
generate_fs_twiddle(struct gallivm_state * gallivm,struct lp_type type,unsigned num_fs,unsigned dst_channels,LLVMValueRef fs_src[][4],LLVMValueRef * dst,bool pad_inline)1237 generate_fs_twiddle(struct gallivm_state *gallivm,
1238                     struct lp_type type,
1239                     unsigned num_fs,
1240                     unsigned dst_channels,
1241                     LLVMValueRef fs_src[][4],
1242                     LLVMValueRef* dst,
1243                     bool pad_inline)
1244 {
1245    LLVMValueRef src[16];
1246 
1247    bool swizzle_pad;
1248    bool twiddle;
1249    bool split;
1250 
1251    unsigned pixels = type.length / 4;
1252    unsigned reorder_group;
1253    unsigned src_channels;
1254    unsigned src_count;
1255    unsigned i;
1256 
1257    src_channels = dst_channels < 3 ? dst_channels : 4;
1258    src_count = num_fs * src_channels;
1259 
1260    assert(pixels == 2 || pixels == 1);
1261    assert(num_fs * src_channels <= ARRAY_SIZE(src));
1262 
1263    /*
1264     * Transpose from SoA -> AoS
1265     */
1266    for (i = 0; i < num_fs; ++i) {
1267       lp_build_transpose_aos_n(gallivm, type, &fs_src[i][0], src_channels, &src[i * src_channels]);
1268    }
1269 
1270    /*
1271     * Pick transformation options
1272     */
1273    swizzle_pad = false;
1274    twiddle = false;
1275    split = false;
1276    reorder_group = 0;
1277 
1278    if (dst_channels == 1) {
1279       twiddle = true;
1280 
1281       if (pixels == 2) {
1282          split = true;
1283       }
1284    } else if (dst_channels == 2) {
1285       if (pixels == 1) {
1286          reorder_group = 1;
1287       }
1288    } else if (dst_channels > 2) {
1289       if (pixels == 1) {
1290          reorder_group = 2;
1291       } else {
1292          twiddle = true;
1293       }
1294 
1295       if (!pad_inline && dst_channels == 3 && pixels > 1) {
1296          swizzle_pad = true;
1297       }
1298    }
1299 
1300    /*
1301     * Split the src in half
1302     */
1303    if (split) {
1304       for (i = num_fs; i > 0; --i) {
1305          src[(i - 1)*2 + 1] = lp_build_extract_range(gallivm, src[i - 1], 4, 4);
1306          src[(i - 1)*2 + 0] = lp_build_extract_range(gallivm, src[i - 1], 0, 4);
1307       }
1308 
1309       src_count *= 2;
1310       type.length = 4;
1311    }
1312 
1313    /*
1314     * Ensure pixels are in memory order
1315     */
1316    if (reorder_group) {
1317       /* Twiddle pixels by reordering the array, e.g.:
1318        *
1319        * src_count =  8 -> 0 2 1 3 4 6 5 7
1320        * src_count = 16 -> 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
1321        */
1322       const unsigned reorder_sw[] = { 0, 2, 1, 3 };
1323 
1324       for (i = 0; i < src_count; ++i) {
1325          unsigned group = i / reorder_group;
1326          unsigned block = (group / 4) * 4 * reorder_group;
1327          unsigned j = block + (reorder_sw[group % 4] * reorder_group) + (i % reorder_group);
1328          dst[i] = src[j];
1329       }
1330    } else if (twiddle) {
1331       /* Twiddle pixels across elements of array */
1332       /*
1333        * XXX: we should avoid this in some cases, but would need to tell
1334        * lp_build_conv to reorder (or deal with it ourselves).
1335        */
1336       lp_bld_quad_twiddle(gallivm, type, src, src_count, dst);
1337    } else {
1338       /* Do nothing */
1339       memcpy(dst, src, sizeof(LLVMValueRef) * src_count);
1340    }
1341 
1342    /*
1343     * Moves any padding between pixels to the end
1344     * e.g. RGBXRGBX -> RGBRGBXX
1345     */
1346    if (swizzle_pad) {
1347       unsigned char swizzles[16];
1348       unsigned elems = pixels * dst_channels;
1349 
1350       for (i = 0; i < type.length; ++i) {
1351          if (i < elems)
1352             swizzles[i] = i % dst_channels + (i / dst_channels) * 4;
1353          else
1354             swizzles[i] = LP_BLD_SWIZZLE_DONTCARE;
1355       }
1356 
1357       for (i = 0; i < src_count; ++i) {
1358          dst[i] = lp_build_swizzle_aos_n(gallivm, dst[i], swizzles, type.length, type.length);
1359       }
1360    }
1361 
1362    return src_count;
1363 }
1364 
1365 
1366 /*
1367  * Untwiddle and transpose, much like the above.
1368  * However, this is after conversion, so we get packed vectors.
1369  * At this time only handle 4x16i8 rgba / 2x16i8 rg / 1x16i8 r data,
1370  * the vectors will look like:
1371  * r0r1r4r5r2r3r6r7r8r9r12... (albeit color channels may
1372  * be swizzled here). Extending to 16bit should be trivial.
1373  * Should also be extended to handle twice wide vectors with AVX2...
1374  */
1375 static void
fs_twiddle_transpose(struct gallivm_state * gallivm,struct lp_type type,LLVMValueRef * src,unsigned src_count,LLVMValueRef * dst)1376 fs_twiddle_transpose(struct gallivm_state *gallivm,
1377                      struct lp_type type,
1378                      LLVMValueRef *src,
1379                      unsigned src_count,
1380                      LLVMValueRef *dst)
1381 {
1382    unsigned i, j;
1383    struct lp_type type64, type16, type32;
1384    LLVMTypeRef type64_t, type8_t, type16_t, type32_t;
1385    LLVMBuilderRef builder = gallivm->builder;
1386    LLVMValueRef tmp[4], shuf[8];
1387    for (j = 0; j < 2; j++) {
1388       shuf[j*4 + 0] = lp_build_const_int32(gallivm, j*4 + 0);
1389       shuf[j*4 + 1] = lp_build_const_int32(gallivm, j*4 + 2);
1390       shuf[j*4 + 2] = lp_build_const_int32(gallivm, j*4 + 1);
1391       shuf[j*4 + 3] = lp_build_const_int32(gallivm, j*4 + 3);
1392    }
1393 
1394    assert(src_count == 4 || src_count == 2 || src_count == 1);
1395    assert(type.width == 8);
1396    assert(type.length == 16);
1397 
1398    type8_t = lp_build_vec_type(gallivm, type);
1399 
1400    type64 = type;
1401    type64.length /= 8;
1402    type64.width *= 8;
1403    type64_t = lp_build_vec_type(gallivm, type64);
1404 
1405    type16 = type;
1406    type16.length /= 2;
1407    type16.width *= 2;
1408    type16_t = lp_build_vec_type(gallivm, type16);
1409 
1410    type32 = type;
1411    type32.length /= 4;
1412    type32.width *= 4;
1413    type32_t = lp_build_vec_type(gallivm, type32);
1414 
1415    lp_build_transpose_aos_n(gallivm, type, src, src_count, tmp);
1416 
1417    if (src_count == 1) {
1418       /* transpose was no-op, just untwiddle */
1419       LLVMValueRef shuf_vec;
1420       shuf_vec = LLVMConstVector(shuf, 8);
1421       tmp[0] = LLVMBuildBitCast(builder, src[0], type16_t, "");
1422       tmp[0] = LLVMBuildShuffleVector(builder, tmp[0], tmp[0], shuf_vec, "");
1423       dst[0] = LLVMBuildBitCast(builder, tmp[0], type8_t, "");
1424    } else if (src_count == 2) {
1425       LLVMValueRef shuf_vec;
1426       shuf_vec = LLVMConstVector(shuf, 4);
1427 
1428       for (i = 0; i < 2; i++) {
1429          tmp[i] = LLVMBuildBitCast(builder, tmp[i], type32_t, "");
1430          tmp[i] = LLVMBuildShuffleVector(builder, tmp[i], tmp[i], shuf_vec, "");
1431          dst[i] = LLVMBuildBitCast(builder, tmp[i], type8_t, "");
1432       }
1433    } else {
1434       for (j = 0; j < 2; j++) {
1435          LLVMValueRef lo, hi, lo2, hi2;
1436           /*
1437           * Note that if we only really have 3 valid channels (rgb)
1438           * and we don't need alpha we could substitute a undef here
1439           * for the respective channel (causing llvm to drop conversion
1440           * for alpha).
1441           */
1442          /* we now have rgba0rgba1rgba4rgba5 etc, untwiddle */
1443          lo2 = LLVMBuildBitCast(builder, tmp[j*2], type64_t, "");
1444          hi2 = LLVMBuildBitCast(builder, tmp[j*2 + 1], type64_t, "");
1445          lo = lp_build_interleave2(gallivm, type64, lo2, hi2, 0);
1446          hi = lp_build_interleave2(gallivm, type64, lo2, hi2, 1);
1447          dst[j*2] = LLVMBuildBitCast(builder, lo, type8_t, "");
1448          dst[j*2 + 1] = LLVMBuildBitCast(builder, hi, type8_t, "");
1449       }
1450    }
1451 }
1452 
1453 
1454 /**
1455  * Load an unswizzled block of pixels from memory
1456  */
1457 static void
load_unswizzled_block(struct gallivm_state * gallivm,LLVMValueRef base_ptr,LLVMValueRef stride,unsigned block_width,unsigned block_height,LLVMValueRef * dst,struct lp_type dst_type,unsigned dst_count,unsigned dst_alignment,LLVMValueRef x_offset,LLVMValueRef y_offset,bool fb_fetch_twiddle)1458 load_unswizzled_block(struct gallivm_state *gallivm,
1459                       LLVMValueRef base_ptr,
1460                       LLVMValueRef stride,
1461                       unsigned block_width,
1462                       unsigned block_height,
1463                       LLVMValueRef* dst,
1464                       struct lp_type dst_type,
1465                       unsigned dst_count,
1466                       unsigned dst_alignment,
1467                       LLVMValueRef x_offset,
1468                       LLVMValueRef y_offset,
1469                       bool fb_fetch_twiddle)
1470 {
1471    LLVMBuilderRef builder = gallivm->builder;
1472    unsigned row_size = dst_count / block_height;
1473    unsigned i;
1474 
1475    /* Ensure block exactly fits into dst */
1476    assert((block_width * block_height) % dst_count == 0);
1477 
1478    for (i = 0; i < dst_count; ++i) {
1479       unsigned x = i % row_size;
1480       unsigned y = i / row_size;
1481 
1482       if (block_height == 2 && dst_count == 8 && fb_fetch_twiddle) {
1483          /* remap the raw slots into the fragment shader execution mode. */
1484          /* this math took me way too long to work out, I'm sure it's overkill. */
1485          x = (i & 1) + ((i >> 2) << 1);
1486          y = (i & 2) >> 1;
1487       }
1488 
1489       LLVMValueRef x_val;
1490       if (x_offset) {
1491          x_val = lp_build_const_int32(gallivm, x);
1492          if (x_offset)
1493             x_val = LLVMBuildAdd(builder, x_val, x_offset, "");
1494          x_val = LLVMBuildMul(builder, x_val, lp_build_const_int32(gallivm, (dst_type.width / 8) * dst_type.length), "");
1495       } else
1496          x_val = lp_build_const_int32(gallivm, x * (dst_type.width / 8) * dst_type.length);
1497 
1498       LLVMValueRef bx = x_val;
1499 
1500       LLVMValueRef y_val = lp_build_const_int32(gallivm, y);
1501       if (y_offset)
1502          y_val = LLVMBuildAdd(builder, y_val, y_offset, "");
1503       LLVMValueRef by = LLVMBuildMul(builder, y_val, stride, "");
1504 
1505       LLVMValueRef gep[2];
1506       LLVMValueRef dst_ptr;
1507 
1508       gep[0] = lp_build_const_int32(gallivm, 0);
1509       gep[1] = LLVMBuildAdd(builder, bx, by, "");
1510 
1511       dst_ptr = LLVMBuildGEP(builder, base_ptr, gep, 2, "");
1512       dst_ptr = LLVMBuildBitCast(builder, dst_ptr,
1513                                  LLVMPointerType(lp_build_vec_type(gallivm, dst_type), 0), "");
1514 
1515       dst[i] = LLVMBuildLoad(builder, dst_ptr, "");
1516 
1517       LLVMSetAlignment(dst[i], dst_alignment);
1518    }
1519 }
1520 
1521 
1522 /**
1523  * Store an unswizzled block of pixels to memory
1524  */
1525 static void
store_unswizzled_block(struct gallivm_state * gallivm,LLVMValueRef base_ptr,LLVMValueRef stride,unsigned block_width,unsigned block_height,LLVMValueRef * src,struct lp_type src_type,unsigned src_count,unsigned src_alignment)1526 store_unswizzled_block(struct gallivm_state *gallivm,
1527                        LLVMValueRef base_ptr,
1528                        LLVMValueRef stride,
1529                        unsigned block_width,
1530                        unsigned block_height,
1531                        LLVMValueRef* src,
1532                        struct lp_type src_type,
1533                        unsigned src_count,
1534                        unsigned src_alignment)
1535 {
1536    LLVMBuilderRef builder = gallivm->builder;
1537    unsigned row_size = src_count / block_height;
1538    unsigned i;
1539 
1540    /* Ensure src exactly fits into block */
1541    assert((block_width * block_height) % src_count == 0);
1542 
1543    for (i = 0; i < src_count; ++i) {
1544       unsigned x = i % row_size;
1545       unsigned y = i / row_size;
1546 
1547       LLVMValueRef bx = lp_build_const_int32(gallivm, x * (src_type.width / 8) * src_type.length);
1548       LLVMValueRef by = LLVMBuildMul(builder, lp_build_const_int32(gallivm, y), stride, "");
1549 
1550       LLVMValueRef gep[2];
1551       LLVMValueRef src_ptr;
1552 
1553       gep[0] = lp_build_const_int32(gallivm, 0);
1554       gep[1] = LLVMBuildAdd(builder, bx, by, "");
1555 
1556       src_ptr = LLVMBuildGEP(builder, base_ptr, gep, 2, "");
1557       src_ptr = LLVMBuildBitCast(builder, src_ptr,
1558                                  LLVMPointerType(lp_build_vec_type(gallivm, src_type), 0), "");
1559 
1560       src_ptr = LLVMBuildStore(builder, src[i], src_ptr);
1561 
1562       LLVMSetAlignment(src_ptr, src_alignment);
1563    }
1564 }
1565 
1566 
1567 
1568 /**
1569  * Retrieves the type for a format which is usable in the blending code.
1570  *
1571  * e.g. RGBA16F = 4x float, R3G3B2 = 3x byte
1572  */
1573 static inline void
lp_blend_type_from_format_desc(const struct util_format_description * format_desc,struct lp_type * type)1574 lp_blend_type_from_format_desc(const struct util_format_description *format_desc,
1575                                struct lp_type* type)
1576 {
1577    unsigned i;
1578    unsigned chan;
1579 
1580    if (format_expands_to_float_soa(format_desc)) {
1581       /* always use ordinary floats for blending */
1582       type->floating = true;
1583       type->fixed = false;
1584       type->sign = true;
1585       type->norm = false;
1586       type->width = 32;
1587       type->length = 4;
1588       return;
1589    }
1590 
1591    for (i = 0; i < 4; i++)
1592       if (format_desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1593          break;
1594    chan = i;
1595 
1596    memset(type, 0, sizeof(struct lp_type));
1597    type->floating = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FLOAT;
1598    type->fixed    = format_desc->channel[chan].type == UTIL_FORMAT_TYPE_FIXED;
1599    type->sign     = format_desc->channel[chan].type != UTIL_FORMAT_TYPE_UNSIGNED;
1600    type->norm     = format_desc->channel[chan].normalized;
1601    type->width    = format_desc->channel[chan].size;
1602    type->length   = format_desc->nr_channels;
1603 
1604    for (i = 1; i < format_desc->nr_channels; ++i) {
1605       if (format_desc->channel[i].size > type->width)
1606          type->width = format_desc->channel[i].size;
1607    }
1608 
1609    if (type->floating) {
1610       type->width = 32;
1611    } else {
1612       if (type->width <= 8) {
1613          type->width = 8;
1614       } else if (type->width <= 16) {
1615          type->width = 16;
1616       } else {
1617          type->width = 32;
1618       }
1619    }
1620 
1621    if (is_arithmetic_format(format_desc) && type->length == 3) {
1622       type->length = 4;
1623    }
1624 }
1625 
1626 
1627 /**
1628  * Scale a normalized value from src_bits to dst_bits.
1629  *
1630  * The exact calculation is
1631  *
1632  *    dst = iround(src * dst_mask / src_mask)
1633  *
1634  *  or with integer rounding
1635  *
1636  *    dst = src * (2*dst_mask + sign(src)*src_mask) / (2*src_mask)
1637  *
1638  *  where
1639  *
1640  *    src_mask = (1 << src_bits) - 1
1641  *    dst_mask = (1 << dst_bits) - 1
1642  *
1643  * but we try to avoid division and multiplication through shifts.
1644  */
1645 static inline LLVMValueRef
scale_bits(struct gallivm_state * gallivm,int src_bits,int dst_bits,LLVMValueRef src,struct lp_type src_type)1646 scale_bits(struct gallivm_state *gallivm,
1647            int src_bits,
1648            int dst_bits,
1649            LLVMValueRef src,
1650            struct lp_type src_type)
1651 {
1652    LLVMBuilderRef builder = gallivm->builder;
1653    LLVMValueRef result = src;
1654 
1655    if (dst_bits < src_bits) {
1656       int delta_bits = src_bits - dst_bits;
1657 
1658       if (delta_bits <= dst_bits) {
1659          /*
1660           * Approximate the rescaling with a single shift.
1661           *
1662           * This gives the wrong rounding.
1663           */
1664 
1665          result = LLVMBuildLShr(builder,
1666                                 src,
1667                                 lp_build_const_int_vec(gallivm, src_type, delta_bits),
1668                                 "");
1669 
1670       } else {
1671          /*
1672           * Try more accurate rescaling.
1673           */
1674 
1675          /*
1676           * Drop the least significant bits to make space for the multiplication.
1677           *
1678           * XXX: A better approach would be to use a wider integer type as intermediate.  But
1679           * this is enough to convert alpha from 16bits -> 2 when rendering to
1680           * PIPE_FORMAT_R10G10B10A2_UNORM.
1681           */
1682          result = LLVMBuildLShr(builder,
1683                                 src,
1684                                 lp_build_const_int_vec(gallivm, src_type, dst_bits),
1685                                 "");
1686 
1687 
1688          result = LLVMBuildMul(builder,
1689                                result,
1690                                lp_build_const_int_vec(gallivm, src_type, (1LL << dst_bits) - 1),
1691                                "");
1692 
1693          /*
1694           * Add a rounding term before the division.
1695           *
1696           * TODO: Handle signed integers too.
1697           */
1698          if (!src_type.sign) {
1699             result = LLVMBuildAdd(builder,
1700                                   result,
1701                                   lp_build_const_int_vec(gallivm, src_type, (1LL << (delta_bits - 1))),
1702                                   "");
1703          }
1704 
1705          /*
1706           * Approximate the division by src_mask with a src_bits shift.
1707           *
1708           * Given the src has already been shifted by dst_bits, all we need
1709           * to do is to shift by the difference.
1710           */
1711 
1712          result = LLVMBuildLShr(builder,
1713                                 result,
1714                                 lp_build_const_int_vec(gallivm, src_type, delta_bits),
1715                                 "");
1716       }
1717 
1718    } else if (dst_bits > src_bits) {
1719       /* Scale up bits */
1720       int db = dst_bits - src_bits;
1721 
1722       /* Shift left by difference in bits */
1723       result = LLVMBuildShl(builder,
1724                             src,
1725                             lp_build_const_int_vec(gallivm, src_type, db),
1726                             "");
1727 
1728       if (db <= src_bits) {
1729          /* Enough bits in src to fill the remainder */
1730          LLVMValueRef lower = LLVMBuildLShr(builder,
1731                                             src,
1732                                             lp_build_const_int_vec(gallivm, src_type, src_bits - db),
1733                                             "");
1734 
1735          result = LLVMBuildOr(builder, result, lower, "");
1736       } else if (db > src_bits) {
1737          /* Need to repeatedly copy src bits to fill remainder in dst */
1738          unsigned n;
1739 
1740          for (n = src_bits; n < dst_bits; n *= 2) {
1741             LLVMValueRef shuv = lp_build_const_int_vec(gallivm, src_type, n);
1742 
1743             result = LLVMBuildOr(builder,
1744                                  result,
1745                                  LLVMBuildLShr(builder, result, shuv, ""),
1746                                  "");
1747          }
1748       }
1749    }
1750 
1751    return result;
1752 }
1753 
1754 /**
1755  * If RT is a smallfloat (needing denorms) format
1756  */
1757 static inline int
have_smallfloat_format(struct lp_type dst_type,enum pipe_format format)1758 have_smallfloat_format(struct lp_type dst_type,
1759                        enum pipe_format format)
1760 {
1761    return ((dst_type.floating && dst_type.width != 32) ||
1762     /* due to format handling hacks this format doesn't have floating set
1763      * here (and actually has width set to 32 too) so special case this. */
1764     (format == PIPE_FORMAT_R11G11B10_FLOAT));
1765 }
1766 
1767 
1768 /**
1769  * Convert from memory format to blending format
1770  *
1771  * e.g. GL_R3G3B2 is 1 byte in memory but 3 bytes for blending
1772  */
1773 static void
convert_to_blend_type(struct gallivm_state * gallivm,unsigned block_size,const struct util_format_description * src_fmt,struct lp_type src_type,struct lp_type dst_type,LLVMValueRef * src,unsigned num_srcs)1774 convert_to_blend_type(struct gallivm_state *gallivm,
1775                       unsigned block_size,
1776                       const struct util_format_description *src_fmt,
1777                       struct lp_type src_type,
1778                       struct lp_type dst_type,
1779                       LLVMValueRef* src, // and dst
1780                       unsigned num_srcs)
1781 {
1782    LLVMValueRef *dst = src;
1783    LLVMBuilderRef builder = gallivm->builder;
1784    struct lp_type blend_type;
1785    struct lp_type mem_type;
1786    unsigned i, j;
1787    unsigned pixels = block_size / num_srcs;
1788    bool is_arith;
1789 
1790    /*
1791     * full custom path for packed floats and srgb formats - none of the later
1792     * functions would do anything useful, and given the lp_type representation they
1793     * can't be fixed. Should really have some SoA blend path for these kind of
1794     * formats rather than hacking them in here.
1795     */
1796    if (format_expands_to_float_soa(src_fmt)) {
1797       LLVMValueRef tmpsrc[4];
1798       /*
1799        * This is pretty suboptimal for this case blending in SoA would be much
1800        * better, since conversion gets us SoA values so need to convert back.
1801        */
1802       assert(src_type.width == 32 || src_type.width == 16);
1803       assert(dst_type.floating);
1804       assert(dst_type.width == 32);
1805       assert(dst_type.length % 4 == 0);
1806       assert(num_srcs % 4 == 0);
1807 
1808       if (src_type.width == 16) {
1809          /* expand 4x16bit values to 4x32bit */
1810          struct lp_type type32x4 = src_type;
1811          LLVMTypeRef ltype32x4;
1812          unsigned num_fetch = dst_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
1813          type32x4.width = 32;
1814          ltype32x4 = lp_build_vec_type(gallivm, type32x4);
1815          for (i = 0; i < num_fetch; i++) {
1816             src[i] = LLVMBuildZExt(builder, src[i], ltype32x4, "");
1817          }
1818          src_type.width = 32;
1819       }
1820       for (i = 0; i < 4; i++) {
1821          tmpsrc[i] = src[i];
1822       }
1823       for (i = 0; i < num_srcs / 4; i++) {
1824          LLVMValueRef tmpsoa[4];
1825          LLVMValueRef tmps = tmpsrc[i];
1826          if (dst_type.length == 8) {
1827             LLVMValueRef shuffles[8];
1828             unsigned j;
1829             /* fetch was 4 values but need 8-wide output values */
1830             tmps = lp_build_concat(gallivm, &tmpsrc[i * 2], src_type, 2);
1831             /*
1832              * for 8-wide aos transpose would give us wrong order not matching
1833              * incoming converted fs values and mask. ARGH.
1834              */
1835             for (j = 0; j < 4; j++) {
1836                shuffles[j] = lp_build_const_int32(gallivm, j * 2);
1837                shuffles[j + 4] = lp_build_const_int32(gallivm, j * 2 + 1);
1838             }
1839             tmps = LLVMBuildShuffleVector(builder, tmps, tmps,
1840                                           LLVMConstVector(shuffles, 8), "");
1841          }
1842          if (src_fmt->format == PIPE_FORMAT_R11G11B10_FLOAT) {
1843             lp_build_r11g11b10_to_float(gallivm, tmps, tmpsoa);
1844          }
1845          else {
1846             lp_build_unpack_rgba_soa(gallivm, src_fmt, dst_type, tmps, tmpsoa);
1847          }
1848          lp_build_transpose_aos(gallivm, dst_type, tmpsoa, &src[i * 4]);
1849       }
1850       return;
1851    }
1852 
1853    lp_mem_type_from_format_desc(src_fmt, &mem_type);
1854    lp_blend_type_from_format_desc(src_fmt, &blend_type);
1855 
1856    /* Is the format arithmetic */
1857    is_arith = blend_type.length * blend_type.width != mem_type.width * mem_type.length;
1858    is_arith &= !(mem_type.width == 16 && mem_type.floating);
1859 
1860    /* Pad if necessary */
1861    if (!is_arith && src_type.length < dst_type.length) {
1862       for (i = 0; i < num_srcs; ++i) {
1863          dst[i] = lp_build_pad_vector(gallivm, src[i], dst_type.length);
1864       }
1865 
1866       src_type.length = dst_type.length;
1867    }
1868 
1869    /* Special case for half-floats */
1870    if (mem_type.width == 16 && mem_type.floating) {
1871       assert(blend_type.width == 32 && blend_type.floating);
1872       lp_build_conv_auto(gallivm, src_type, &dst_type, dst, num_srcs, dst);
1873       is_arith = false;
1874    }
1875 
1876    if (!is_arith) {
1877       return;
1878    }
1879 
1880    src_type.width = blend_type.width * blend_type.length;
1881    blend_type.length *= pixels;
1882    src_type.length *= pixels / (src_type.length / mem_type.length);
1883 
1884    for (i = 0; i < num_srcs; ++i) {
1885       LLVMValueRef chans[4];
1886       LLVMValueRef res = NULL;
1887 
1888       dst[i] = LLVMBuildZExt(builder, src[i], lp_build_vec_type(gallivm, src_type), "");
1889 
1890       for (j = 0; j < src_fmt->nr_channels; ++j) {
1891          unsigned mask = 0;
1892          unsigned sa = src_fmt->channel[j].shift;
1893 #if UTIL_ARCH_LITTLE_ENDIAN
1894          unsigned from_lsb = j;
1895 #else
1896          unsigned from_lsb = src_fmt->nr_channels - j - 1;
1897 #endif
1898 
1899          mask = (1 << src_fmt->channel[j].size) - 1;
1900 
1901          /* Extract bits from source */
1902          chans[j] = LLVMBuildLShr(builder,
1903                                   dst[i],
1904                                   lp_build_const_int_vec(gallivm, src_type, sa),
1905                                   "");
1906 
1907          chans[j] = LLVMBuildAnd(builder,
1908                                  chans[j],
1909                                  lp_build_const_int_vec(gallivm, src_type, mask),
1910                                  "");
1911 
1912          /* Scale bits */
1913          if (src_type.norm) {
1914             chans[j] = scale_bits(gallivm, src_fmt->channel[j].size,
1915                                   blend_type.width, chans[j], src_type);
1916          }
1917 
1918          /* Insert bits into correct position */
1919          chans[j] = LLVMBuildShl(builder,
1920                                  chans[j],
1921                                  lp_build_const_int_vec(gallivm, src_type, from_lsb * blend_type.width),
1922                                  "");
1923 
1924          if (j == 0) {
1925             res = chans[j];
1926          } else {
1927             res = LLVMBuildOr(builder, res, chans[j], "");
1928          }
1929       }
1930 
1931       dst[i] = LLVMBuildBitCast(builder, res, lp_build_vec_type(gallivm, blend_type), "");
1932    }
1933 }
1934 
1935 
1936 /**
1937  * Convert from blending format to memory format
1938  *
1939  * e.g. GL_R3G3B2 is 3 bytes for blending but 1 byte in memory
1940  */
1941 static void
convert_from_blend_type(struct gallivm_state * gallivm,unsigned block_size,const struct util_format_description * src_fmt,struct lp_type src_type,struct lp_type dst_type,LLVMValueRef * src,unsigned num_srcs)1942 convert_from_blend_type(struct gallivm_state *gallivm,
1943                         unsigned block_size,
1944                         const struct util_format_description *src_fmt,
1945                         struct lp_type src_type,
1946                         struct lp_type dst_type,
1947                         LLVMValueRef* src, // and dst
1948                         unsigned num_srcs)
1949 {
1950    LLVMValueRef* dst = src;
1951    unsigned i, j, k;
1952    struct lp_type mem_type;
1953    struct lp_type blend_type;
1954    LLVMBuilderRef builder = gallivm->builder;
1955    unsigned pixels = block_size / num_srcs;
1956    bool is_arith;
1957 
1958    /*
1959     * full custom path for packed floats and srgb formats - none of the later
1960     * functions would do anything useful, and given the lp_type representation they
1961     * can't be fixed. Should really have some SoA blend path for these kind of
1962     * formats rather than hacking them in here.
1963     */
1964    if (format_expands_to_float_soa(src_fmt)) {
1965       /*
1966        * This is pretty suboptimal for this case blending in SoA would be much
1967        * better - we need to transpose the AoS values back to SoA values for
1968        * conversion/packing.
1969        */
1970       assert(src_type.floating);
1971       assert(src_type.width == 32);
1972       assert(src_type.length % 4 == 0);
1973       assert(dst_type.width == 32 || dst_type.width == 16);
1974 
1975       for (i = 0; i < num_srcs / 4; i++) {
1976          LLVMValueRef tmpsoa[4], tmpdst;
1977          lp_build_transpose_aos(gallivm, src_type, &src[i * 4], tmpsoa);
1978          /* really really need SoA here */
1979 
1980          if (src_fmt->format == PIPE_FORMAT_R11G11B10_FLOAT) {
1981             tmpdst = lp_build_float_to_r11g11b10(gallivm, tmpsoa);
1982          }
1983          else {
1984             tmpdst = lp_build_float_to_srgb_packed(gallivm, src_fmt,
1985                                                    src_type, tmpsoa);
1986          }
1987 
1988          if (src_type.length == 8) {
1989             LLVMValueRef tmpaos, shuffles[8];
1990             unsigned j;
1991             /*
1992              * for 8-wide aos transpose has given us wrong order not matching
1993              * output order. HMPF. Also need to split the output values manually.
1994              */
1995             for (j = 0; j < 4; j++) {
1996                shuffles[j * 2] = lp_build_const_int32(gallivm, j);
1997                shuffles[j * 2 + 1] = lp_build_const_int32(gallivm, j + 4);
1998             }
1999             tmpaos = LLVMBuildShuffleVector(builder, tmpdst, tmpdst,
2000                                             LLVMConstVector(shuffles, 8), "");
2001             src[i * 2] = lp_build_extract_range(gallivm, tmpaos, 0, 4);
2002             src[i * 2 + 1] = lp_build_extract_range(gallivm, tmpaos, 4, 4);
2003          }
2004          else {
2005             src[i] = tmpdst;
2006          }
2007       }
2008       if (dst_type.width == 16) {
2009          struct lp_type type16x8 = dst_type;
2010          struct lp_type type32x4 = dst_type;
2011          LLVMTypeRef ltype16x4, ltypei64, ltypei128;
2012          unsigned num_fetch = src_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
2013          type16x8.length = 8;
2014          type32x4.width = 32;
2015          ltypei128 = LLVMIntTypeInContext(gallivm->context, 128);
2016          ltypei64 = LLVMIntTypeInContext(gallivm->context, 64);
2017          ltype16x4 = lp_build_vec_type(gallivm, dst_type);
2018          /* We could do vector truncation but it doesn't generate very good code */
2019          for (i = 0; i < num_fetch; i++) {
2020             src[i] = lp_build_pack2(gallivm, type32x4, type16x8,
2021                                     src[i], lp_build_zero(gallivm, type32x4));
2022             src[i] = LLVMBuildBitCast(builder, src[i], ltypei128, "");
2023             src[i] = LLVMBuildTrunc(builder, src[i], ltypei64, "");
2024             src[i] = LLVMBuildBitCast(builder, src[i], ltype16x4, "");
2025          }
2026       }
2027       return;
2028    }
2029 
2030    lp_mem_type_from_format_desc(src_fmt, &mem_type);
2031    lp_blend_type_from_format_desc(src_fmt, &blend_type);
2032 
2033    is_arith = (blend_type.length * blend_type.width != mem_type.width * mem_type.length);
2034 
2035    /* Special case for half-floats */
2036    if (mem_type.width == 16 && mem_type.floating) {
2037       int length = dst_type.length;
2038       assert(blend_type.width == 32 && blend_type.floating);
2039 
2040       dst_type.length = src_type.length;
2041 
2042       lp_build_conv_auto(gallivm, src_type, &dst_type, dst, num_srcs, dst);
2043 
2044       dst_type.length = length;
2045       is_arith = false;
2046    }
2047 
2048    /* Remove any padding */
2049    if (!is_arith && (src_type.length % mem_type.length)) {
2050       src_type.length -= (src_type.length % mem_type.length);
2051 
2052       for (i = 0; i < num_srcs; ++i) {
2053          dst[i] = lp_build_extract_range(gallivm, dst[i], 0, src_type.length);
2054       }
2055    }
2056 
2057    /* No bit arithmetic to do */
2058    if (!is_arith) {
2059       return;
2060    }
2061 
2062    src_type.length = pixels;
2063    src_type.width = blend_type.length * blend_type.width;
2064    dst_type.length = pixels;
2065 
2066    for (i = 0; i < num_srcs; ++i) {
2067       LLVMValueRef chans[4];
2068       LLVMValueRef res = NULL;
2069 
2070       dst[i] = LLVMBuildBitCast(builder, src[i], lp_build_vec_type(gallivm, src_type), "");
2071 
2072       for (j = 0; j < src_fmt->nr_channels; ++j) {
2073          unsigned mask = 0;
2074          unsigned sa = src_fmt->channel[j].shift;
2075          unsigned sz_a = src_fmt->channel[j].size;
2076 #if UTIL_ARCH_LITTLE_ENDIAN
2077          unsigned from_lsb = j;
2078 #else
2079          unsigned from_lsb = src_fmt->nr_channels - j - 1;
2080 #endif
2081 
2082          assert(blend_type.width > src_fmt->channel[j].size);
2083 
2084          for (k = 0; k < blend_type.width; ++k) {
2085             mask |= 1 << k;
2086          }
2087 
2088          /* Extract bits */
2089          chans[j] = LLVMBuildLShr(builder,
2090                                   dst[i],
2091                                   lp_build_const_int_vec(gallivm, src_type,
2092                                                          from_lsb * blend_type.width),
2093                                   "");
2094 
2095          chans[j] = LLVMBuildAnd(builder,
2096                                  chans[j],
2097                                  lp_build_const_int_vec(gallivm, src_type, mask),
2098                                  "");
2099 
2100          /* Scale down bits */
2101          if (src_type.norm) {
2102             chans[j] = scale_bits(gallivm, blend_type.width,
2103                                   src_fmt->channel[j].size, chans[j], src_type);
2104          } else if (!src_type.floating && sz_a < blend_type.width) {
2105             LLVMValueRef mask_val = lp_build_const_int_vec(gallivm, src_type, (1UL << sz_a) - 1);
2106             LLVMValueRef mask = LLVMBuildICmp(builder, LLVMIntUGT, chans[j], mask_val, "");
2107             chans[j] = LLVMBuildSelect(builder, mask, mask_val, chans[j], "");
2108          }
2109 
2110          /* Insert bits */
2111          chans[j] = LLVMBuildShl(builder,
2112                                  chans[j],
2113                                  lp_build_const_int_vec(gallivm, src_type, sa),
2114                                  "");
2115 
2116          sa += src_fmt->channel[j].size;
2117 
2118          if (j == 0) {
2119             res = chans[j];
2120          } else {
2121             res = LLVMBuildOr(builder, res, chans[j], "");
2122          }
2123       }
2124 
2125       assert (dst_type.width != 24);
2126 
2127       dst[i] = LLVMBuildTrunc(builder, res, lp_build_vec_type(gallivm, dst_type), "");
2128    }
2129 }
2130 
2131 
2132 /**
2133  * Convert alpha to same blend type as src
2134  */
2135 static void
convert_alpha(struct gallivm_state * gallivm,struct lp_type row_type,struct lp_type alpha_type,const unsigned block_size,const unsigned block_height,const unsigned src_count,const unsigned dst_channels,const bool pad_inline,LLVMValueRef * src_alpha)2136 convert_alpha(struct gallivm_state *gallivm,
2137               struct lp_type row_type,
2138               struct lp_type alpha_type,
2139               const unsigned block_size,
2140               const unsigned block_height,
2141               const unsigned src_count,
2142               const unsigned dst_channels,
2143               const bool pad_inline,
2144               LLVMValueRef* src_alpha)
2145 {
2146    LLVMBuilderRef builder = gallivm->builder;
2147    unsigned i, j;
2148    unsigned length = row_type.length;
2149    row_type.length = alpha_type.length;
2150 
2151    /* Twiddle the alpha to match pixels */
2152    lp_bld_quad_twiddle(gallivm, alpha_type, src_alpha, block_height, src_alpha);
2153 
2154    /*
2155     * TODO this should use single lp_build_conv call for
2156     * src_count == 1 && dst_channels == 1 case (dropping the concat below)
2157     */
2158    for (i = 0; i < block_height; ++i) {
2159       lp_build_conv(gallivm, alpha_type, row_type, &src_alpha[i], 1, &src_alpha[i], 1);
2160    }
2161 
2162    alpha_type = row_type;
2163    row_type.length = length;
2164 
2165    /* If only one channel we can only need the single alpha value per pixel */
2166    if (src_count == 1 && dst_channels == 1) {
2167 
2168       lp_build_concat_n(gallivm, alpha_type, src_alpha, block_height, src_alpha, src_count);
2169    } else {
2170       /* If there are more srcs than rows then we need to split alpha up */
2171       if (src_count > block_height) {
2172          for (i = src_count; i > 0; --i) {
2173             unsigned pixels = block_size / src_count;
2174             unsigned idx = i - 1;
2175 
2176             src_alpha[idx] = lp_build_extract_range(gallivm, src_alpha[(idx * pixels) / 4],
2177                                                     (idx * pixels) % 4, pixels);
2178          }
2179       }
2180 
2181       /* If there is a src for each pixel broadcast the alpha across whole row */
2182       if (src_count == block_size) {
2183          for (i = 0; i < src_count; ++i) {
2184             src_alpha[i] = lp_build_broadcast(gallivm,
2185                               lp_build_vec_type(gallivm, row_type), src_alpha[i]);
2186          }
2187       } else {
2188          unsigned pixels = block_size / src_count;
2189          unsigned channels = pad_inline ? TGSI_NUM_CHANNELS : dst_channels;
2190          unsigned alpha_span = 1;
2191          LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
2192 
2193          /* Check if we need 2 src_alphas for our shuffles */
2194          if (pixels > alpha_type.length) {
2195             alpha_span = 2;
2196          }
2197 
2198          /* Broadcast alpha across all channels, e.g. a1a2 to a1a1a1a1a2a2a2a2 */
2199          for (j = 0; j < row_type.length; ++j) {
2200             if (j < pixels * channels) {
2201                shuffles[j] = lp_build_const_int32(gallivm, j / channels);
2202             } else {
2203                shuffles[j] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
2204             }
2205          }
2206 
2207          for (i = 0; i < src_count; ++i) {
2208             unsigned idx1 = i, idx2 = i;
2209 
2210             if (alpha_span > 1){
2211                idx1 *= alpha_span;
2212                idx2 = idx1 + 1;
2213             }
2214 
2215             src_alpha[i] = LLVMBuildShuffleVector(builder,
2216                                                   src_alpha[idx1],
2217                                                   src_alpha[idx2],
2218                                                   LLVMConstVector(shuffles, row_type.length),
2219                                                   "");
2220          }
2221       }
2222    }
2223 }
2224 
2225 
2226 /**
2227  * Generates the blend function for unswizzled colour buffers
2228  * Also generates the read & write from colour buffer
2229  */
2230 static void
generate_unswizzled_blend(struct gallivm_state * gallivm,unsigned rt,struct lp_fragment_shader_variant * variant,enum pipe_format out_format,unsigned int num_fs,struct lp_type fs_type,LLVMValueRef * fs_mask,LLVMValueRef fs_out_color[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][4],LLVMValueRef context_ptr,LLVMValueRef color_ptr,LLVMValueRef stride,unsigned partial_mask,boolean do_branch)2231 generate_unswizzled_blend(struct gallivm_state *gallivm,
2232                           unsigned rt,
2233                           struct lp_fragment_shader_variant *variant,
2234                           enum pipe_format out_format,
2235                           unsigned int num_fs,
2236                           struct lp_type fs_type,
2237                           LLVMValueRef* fs_mask,
2238                           LLVMValueRef fs_out_color[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][4],
2239                           LLVMValueRef context_ptr,
2240                           LLVMValueRef color_ptr,
2241                           LLVMValueRef stride,
2242                           unsigned partial_mask,
2243                           boolean do_branch)
2244 {
2245    const unsigned alpha_channel = 3;
2246    const unsigned block_width = LP_RASTER_BLOCK_SIZE;
2247    const unsigned block_height = LP_RASTER_BLOCK_SIZE;
2248    const unsigned block_size = block_width * block_height;
2249    const unsigned lp_integer_vector_width = 128;
2250 
2251    LLVMBuilderRef builder = gallivm->builder;
2252    LLVMValueRef fs_src[4][TGSI_NUM_CHANNELS];
2253    LLVMValueRef fs_src1[4][TGSI_NUM_CHANNELS];
2254    LLVMValueRef src_alpha[4 * 4];
2255    LLVMValueRef src1_alpha[4 * 4] = { NULL };
2256    LLVMValueRef src_mask[4 * 4];
2257    LLVMValueRef src[4 * 4];
2258    LLVMValueRef src1[4 * 4];
2259    LLVMValueRef dst[4 * 4];
2260    LLVMValueRef blend_color;
2261    LLVMValueRef blend_alpha;
2262    LLVMValueRef i32_zero;
2263    LLVMValueRef check_mask;
2264    LLVMValueRef undef_src_val;
2265 
2266    struct lp_build_mask_context mask_ctx;
2267    struct lp_type mask_type;
2268    struct lp_type blend_type;
2269    struct lp_type row_type;
2270    struct lp_type dst_type;
2271    struct lp_type ls_type;
2272 
2273    unsigned char swizzle[TGSI_NUM_CHANNELS];
2274    unsigned vector_width;
2275    unsigned src_channels = TGSI_NUM_CHANNELS;
2276    unsigned dst_channels;
2277    unsigned dst_count;
2278    unsigned src_count;
2279    unsigned i, j;
2280 
2281    const struct util_format_description* out_format_desc = util_format_description(out_format);
2282 
2283    unsigned dst_alignment;
2284 
2285    bool pad_inline = is_arithmetic_format(out_format_desc);
2286    bool has_alpha = false;
2287    const boolean dual_source_blend = variant->key.blend.rt[0].blend_enable &&
2288                                      util_blend_state_is_dual(&variant->key.blend, 0);
2289 
2290    const boolean is_1d = variant->key.resource_1d;
2291    boolean twiddle_after_convert = FALSE;
2292    unsigned num_fullblock_fs = is_1d ? 2 * num_fs : num_fs;
2293    LLVMValueRef fpstate = 0;
2294 
2295    /* Get type from output format */
2296    lp_blend_type_from_format_desc(out_format_desc, &row_type);
2297    lp_mem_type_from_format_desc(out_format_desc, &dst_type);
2298 
2299    /*
2300     * Technically this code should go into lp_build_smallfloat_to_float
2301     * and lp_build_float_to_smallfloat but due to the
2302     * http://llvm.org/bugs/show_bug.cgi?id=6393
2303     * llvm reorders the mxcsr intrinsics in a way that breaks the code.
2304     * So the ordering is important here and there shouldn't be any
2305     * llvm ir instrunctions in this function before
2306     * this, otherwise half-float format conversions won't work
2307     * (again due to llvm bug #6393).
2308     */
2309    if (have_smallfloat_format(dst_type, out_format)) {
2310       /* We need to make sure that denorms are ok for half float
2311          conversions */
2312       fpstate = lp_build_fpstate_get(gallivm);
2313       lp_build_fpstate_set_denorms_zero(gallivm, FALSE);
2314    }
2315 
2316    mask_type = lp_int32_vec4_type();
2317    mask_type.length = fs_type.length;
2318 
2319    for (i = num_fs; i < num_fullblock_fs; i++) {
2320       fs_mask[i] = lp_build_zero(gallivm, mask_type);
2321    }
2322 
2323    /* Do not bother executing code when mask is empty.. */
2324    if (do_branch) {
2325       check_mask = LLVMConstNull(lp_build_int_vec_type(gallivm, mask_type));
2326 
2327       for (i = 0; i < num_fullblock_fs; ++i) {
2328          check_mask = LLVMBuildOr(builder, check_mask, fs_mask[i], "");
2329       }
2330 
2331       lp_build_mask_begin(&mask_ctx, gallivm, mask_type, check_mask);
2332       lp_build_mask_check(&mask_ctx);
2333    }
2334 
2335    partial_mask |= !variant->opaque;
2336    i32_zero = lp_build_const_int32(gallivm, 0);
2337 
2338    undef_src_val = lp_build_undef(gallivm, fs_type);
2339 
2340    row_type.length = fs_type.length;
2341    vector_width    = dst_type.floating ? lp_native_vector_width : lp_integer_vector_width;
2342 
2343    /* Compute correct swizzle and count channels */
2344    memset(swizzle, LP_BLD_SWIZZLE_DONTCARE, TGSI_NUM_CHANNELS);
2345    dst_channels = 0;
2346 
2347    for (i = 0; i < TGSI_NUM_CHANNELS; ++i) {
2348       /* Ensure channel is used */
2349       if (out_format_desc->swizzle[i] >= TGSI_NUM_CHANNELS) {
2350          continue;
2351       }
2352 
2353       /* Ensure not already written to (happens in case with GL_ALPHA) */
2354       if (swizzle[out_format_desc->swizzle[i]] < TGSI_NUM_CHANNELS) {
2355          continue;
2356       }
2357 
2358       /* Ensure we havn't already found all channels */
2359       if (dst_channels >= out_format_desc->nr_channels) {
2360          continue;
2361       }
2362 
2363       swizzle[out_format_desc->swizzle[i]] = i;
2364       ++dst_channels;
2365 
2366       if (i == alpha_channel) {
2367          has_alpha = true;
2368       }
2369    }
2370 
2371    if (format_expands_to_float_soa(out_format_desc)) {
2372       /*
2373        * the code above can't work for layout_other
2374        * for srgb it would sort of work but we short-circuit swizzles, etc.
2375        * as that is done as part of unpack / pack.
2376        */
2377       dst_channels = 4; /* HACK: this is fake 4 really but need it due to transpose stuff later */
2378       has_alpha = true;
2379       swizzle[0] = 0;
2380       swizzle[1] = 1;
2381       swizzle[2] = 2;
2382       swizzle[3] = 3;
2383       pad_inline = true; /* HACK: prevent rgbxrgbx->rgbrgbxx conversion later */
2384    }
2385 
2386    /* If 3 channels then pad to include alpha for 4 element transpose */
2387    if (dst_channels == 3) {
2388       assert (!has_alpha);
2389       for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
2390          if (swizzle[i] > TGSI_NUM_CHANNELS)
2391             swizzle[i] = 3;
2392       }
2393       if (out_format_desc->nr_channels == 4) {
2394          dst_channels = 4;
2395          /*
2396           * We use alpha from the color conversion, not separate one.
2397           * We had to include it for transpose, hence it will get converted
2398           * too (albeit when doing transpose after conversion, that would
2399           * no longer be the case necessarily).
2400           * (It works only with 4 channel dsts, e.g. rgbx formats, because
2401           * otherwise we really have padding, not alpha, included.)
2402           */
2403          has_alpha = true;
2404       }
2405    }
2406 
2407    /*
2408     * Load shader output
2409     */
2410    for (i = 0; i < num_fullblock_fs; ++i) {
2411       /* Always load alpha for use in blending */
2412       LLVMValueRef alpha;
2413       if (i < num_fs) {
2414          alpha = LLVMBuildLoad(builder, fs_out_color[rt][alpha_channel][i], "");
2415       }
2416       else {
2417          alpha = undef_src_val;
2418       }
2419 
2420       /* Load each channel */
2421       for (j = 0; j < dst_channels; ++j) {
2422          assert(swizzle[j] < 4);
2423          if (i < num_fs) {
2424             fs_src[i][j] = LLVMBuildLoad(builder, fs_out_color[rt][swizzle[j]][i], "");
2425          }
2426          else {
2427             fs_src[i][j] = undef_src_val;
2428          }
2429       }
2430 
2431       /* If 3 channels then pad to include alpha for 4 element transpose */
2432       /*
2433        * XXX If we include that here maybe could actually use it instead of
2434        * separate alpha for blending?
2435        * (Difficult though we actually convert pad channels, not alpha.)
2436        */
2437       if (dst_channels == 3 && !has_alpha) {
2438          fs_src[i][3] = alpha;
2439       }
2440 
2441       /* We split the row_mask and row_alpha as we want 128bit interleave */
2442       if (fs_type.length == 8) {
2443          src_mask[i*2 + 0]  = lp_build_extract_range(gallivm, fs_mask[i],
2444                                                      0, src_channels);
2445          src_mask[i*2 + 1]  = lp_build_extract_range(gallivm, fs_mask[i],
2446                                                      src_channels, src_channels);
2447 
2448          src_alpha[i*2 + 0] = lp_build_extract_range(gallivm, alpha, 0, src_channels);
2449          src_alpha[i*2 + 1] = lp_build_extract_range(gallivm, alpha,
2450                                                      src_channels, src_channels);
2451       } else {
2452          src_mask[i] = fs_mask[i];
2453          src_alpha[i] = alpha;
2454       }
2455    }
2456    if (dual_source_blend) {
2457       /* same as above except different src/dst, skip masks and comments... */
2458       for (i = 0; i < num_fullblock_fs; ++i) {
2459          LLVMValueRef alpha;
2460          if (i < num_fs) {
2461             alpha = LLVMBuildLoad(builder, fs_out_color[1][alpha_channel][i], "");
2462          }
2463          else {
2464             alpha = undef_src_val;
2465          }
2466 
2467          for (j = 0; j < dst_channels; ++j) {
2468             assert(swizzle[j] < 4);
2469             if (i < num_fs) {
2470                fs_src1[i][j] = LLVMBuildLoad(builder, fs_out_color[1][swizzle[j]][i], "");
2471             }
2472             else {
2473                fs_src1[i][j] = undef_src_val;
2474             }
2475          }
2476          if (dst_channels == 3 && !has_alpha) {
2477             fs_src1[i][3] = alpha;
2478          }
2479          if (fs_type.length == 8) {
2480             src1_alpha[i*2 + 0] = lp_build_extract_range(gallivm, alpha, 0, src_channels);
2481             src1_alpha[i*2 + 1] = lp_build_extract_range(gallivm, alpha,
2482                                                          src_channels, src_channels);
2483          } else {
2484             src1_alpha[i] = alpha;
2485          }
2486       }
2487    }
2488 
2489    if (util_format_is_pure_integer(out_format)) {
2490       /*
2491        * In this case fs_type was really ints or uints disguised as floats,
2492        * fix that up now.
2493        */
2494       fs_type.floating = 0;
2495       fs_type.sign = dst_type.sign;
2496       for (i = 0; i < num_fullblock_fs; ++i) {
2497          for (j = 0; j < dst_channels; ++j) {
2498             fs_src[i][j] = LLVMBuildBitCast(builder, fs_src[i][j],
2499                                             lp_build_vec_type(gallivm, fs_type), "");
2500          }
2501          if (dst_channels == 3 && !has_alpha) {
2502             fs_src[i][3] = LLVMBuildBitCast(builder, fs_src[i][3],
2503                                             lp_build_vec_type(gallivm, fs_type), "");
2504          }
2505       }
2506    }
2507 
2508    /*
2509     * We actually should generally do conversion first (for non-1d cases)
2510     * when the blend format is 8 or 16 bits. The reason is obvious,
2511     * there's 2 or 4 times less vectors to deal with for the interleave...
2512     * Albeit for the AVX (not AVX2) case there's no benefit with 16 bit
2513     * vectors (as it can do 32bit unpack with 256bit vectors, but 8/16bit
2514     * unpack only with 128bit vectors).
2515     * Note: for 16bit sizes really need matching pack conversion code
2516     */
2517    if (!is_1d && dst_channels != 3 && dst_type.width == 8) {
2518       twiddle_after_convert = TRUE;
2519    }
2520 
2521    /*
2522     * Pixel twiddle from fragment shader order to memory order
2523     */
2524    if (!twiddle_after_convert) {
2525       src_count = generate_fs_twiddle(gallivm, fs_type, num_fullblock_fs,
2526                                       dst_channels, fs_src, src, pad_inline);
2527       if (dual_source_blend) {
2528          generate_fs_twiddle(gallivm, fs_type, num_fullblock_fs, dst_channels,
2529                              fs_src1, src1, pad_inline);
2530       }
2531    } else {
2532       src_count = num_fullblock_fs * dst_channels;
2533       /*
2534        * We reorder things a bit here, so the cases for 4-wide and 8-wide
2535        * (AVX) turn out the same later when untwiddling/transpose (albeit
2536        * for true AVX2 path untwiddle needs to be different).
2537        * For now just order by colors first (so we can use unpack later).
2538        */
2539       for (j = 0; j < num_fullblock_fs; j++) {
2540          for (i = 0; i < dst_channels; i++) {
2541             src[i*num_fullblock_fs + j] = fs_src[j][i];
2542             if (dual_source_blend) {
2543                src1[i*num_fullblock_fs + j] = fs_src1[j][i];
2544             }
2545          }
2546       }
2547    }
2548 
2549    src_channels = dst_channels < 3 ? dst_channels : 4;
2550    if (src_count != num_fullblock_fs * src_channels) {
2551       unsigned ds = src_count / (num_fullblock_fs * src_channels);
2552       row_type.length /= ds;
2553       fs_type.length = row_type.length;
2554    }
2555 
2556    blend_type = row_type;
2557    mask_type.length = 4;
2558 
2559    /* Convert src to row_type */
2560    if (dual_source_blend) {
2561       struct lp_type old_row_type = row_type;
2562       lp_build_conv_auto(gallivm, fs_type, &row_type, src, src_count, src);
2563       src_count = lp_build_conv_auto(gallivm, fs_type, &old_row_type, src1, src_count, src1);
2564    }
2565    else {
2566       src_count = lp_build_conv_auto(gallivm, fs_type, &row_type, src, src_count, src);
2567    }
2568 
2569    /* If the rows are not an SSE vector, combine them to become SSE size! */
2570    if ((row_type.width * row_type.length) % 128) {
2571       unsigned bits = row_type.width * row_type.length;
2572       unsigned combined;
2573 
2574       assert(src_count >= (vector_width / bits));
2575 
2576       dst_count = src_count / (vector_width / bits);
2577 
2578       combined = lp_build_concat_n(gallivm, row_type, src, src_count, src, dst_count);
2579       if (dual_source_blend) {
2580          lp_build_concat_n(gallivm, row_type, src1, src_count, src1, dst_count);
2581       }
2582 
2583       row_type.length *= combined;
2584       src_count /= combined;
2585 
2586       bits = row_type.width * row_type.length;
2587       assert(bits == 128 || bits == 256);
2588    }
2589 
2590    if (twiddle_after_convert) {
2591       fs_twiddle_transpose(gallivm, row_type, src, src_count, src);
2592       if (dual_source_blend) {
2593          fs_twiddle_transpose(gallivm, row_type, src1, src_count, src1);
2594       }
2595    }
2596 
2597    /*
2598     * Blend Colour conversion
2599     */
2600    blend_color = lp_jit_context_f_blend_color(gallivm, context_ptr);
2601    blend_color = LLVMBuildPointerCast(builder, blend_color,
2602                     LLVMPointerType(lp_build_vec_type(gallivm, fs_type), 0), "");
2603    blend_color = LLVMBuildLoad(builder, LLVMBuildGEP(builder, blend_color,
2604                                &i32_zero, 1, ""), "");
2605 
2606    /* Convert */
2607    lp_build_conv(gallivm, fs_type, blend_type, &blend_color, 1, &blend_color, 1);
2608 
2609    if (out_format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
2610       /*
2611        * since blending is done with floats, there was no conversion.
2612        * However, the rules according to fixed point renderbuffers still
2613        * apply, that is we must clamp inputs to 0.0/1.0.
2614        * (This would apply to separate alpha conversion too but we currently
2615        * force has_alpha to be true.)
2616        * TODO: should skip this with "fake" blend, since post-blend conversion
2617        * will clamp anyway.
2618        * TODO: could also skip this if fragment color clamping is enabled. We
2619        * don't support it natively so it gets baked into the shader however, so
2620        * can't really tell here.
2621        */
2622       struct lp_build_context f32_bld;
2623       assert(row_type.floating);
2624       lp_build_context_init(&f32_bld, gallivm, row_type);
2625       for (i = 0; i < src_count; i++) {
2626          src[i] = lp_build_clamp_zero_one_nanzero(&f32_bld, src[i]);
2627       }
2628       if (dual_source_blend) {
2629          for (i = 0; i < src_count; i++) {
2630             src1[i] = lp_build_clamp_zero_one_nanzero(&f32_bld, src1[i]);
2631          }
2632       }
2633       /* probably can't be different than row_type but better safe than sorry... */
2634       lp_build_context_init(&f32_bld, gallivm, blend_type);
2635       blend_color = lp_build_clamp(&f32_bld, blend_color, f32_bld.zero, f32_bld.one);
2636    }
2637 
2638    /* Extract alpha */
2639    blend_alpha = lp_build_extract_broadcast(gallivm, blend_type, row_type, blend_color, lp_build_const_int32(gallivm, 3));
2640 
2641    /* Swizzle to appropriate channels, e.g. from RGBA to BGRA BGRA */
2642    pad_inline &= (dst_channels * (block_size / src_count) * row_type.width) != vector_width;
2643    if (pad_inline) {
2644       /* Use all 4 channels e.g. from RGBA RGBA to RGxx RGxx */
2645       blend_color = lp_build_swizzle_aos_n(gallivm, blend_color, swizzle, TGSI_NUM_CHANNELS, row_type.length);
2646    } else {
2647       /* Only use dst_channels e.g. RGBA RGBA to RG RG xxxx */
2648       blend_color = lp_build_swizzle_aos_n(gallivm, blend_color, swizzle, dst_channels, row_type.length);
2649    }
2650 
2651    /*
2652     * Mask conversion
2653     */
2654    lp_bld_quad_twiddle(gallivm, mask_type, &src_mask[0], block_height, &src_mask[0]);
2655 
2656    if (src_count < block_height) {
2657       lp_build_concat_n(gallivm, mask_type, src_mask, 4, src_mask, src_count);
2658    } else if (src_count > block_height) {
2659       for (i = src_count; i > 0; --i) {
2660          unsigned pixels = block_size / src_count;
2661          unsigned idx = i - 1;
2662 
2663          src_mask[idx] = lp_build_extract_range(gallivm, src_mask[(idx * pixels) / 4],
2664                                                 (idx * pixels) % 4, pixels);
2665       }
2666    }
2667 
2668    assert(mask_type.width == 32);
2669 
2670    for (i = 0; i < src_count; ++i) {
2671       unsigned pixels = block_size / src_count;
2672       unsigned pixel_width = row_type.width * dst_channels;
2673 
2674       if (pixel_width == 24) {
2675          mask_type.width = 8;
2676          mask_type.length = vector_width / mask_type.width;
2677       } else {
2678          mask_type.length = pixels;
2679          mask_type.width = row_type.width * dst_channels;
2680 
2681          /*
2682           * If mask_type width is smaller than 32bit, this doesn't quite
2683           * generate the most efficient code (could use some pack).
2684           */
2685          src_mask[i] = LLVMBuildIntCast(builder, src_mask[i],
2686                                         lp_build_int_vec_type(gallivm, mask_type), "");
2687 
2688          mask_type.length *= dst_channels;
2689          mask_type.width /= dst_channels;
2690       }
2691 
2692       src_mask[i] = LLVMBuildBitCast(builder, src_mask[i],
2693                                      lp_build_int_vec_type(gallivm, mask_type), "");
2694       src_mask[i] = lp_build_pad_vector(gallivm, src_mask[i], row_type.length);
2695    }
2696 
2697    /*
2698     * Alpha conversion
2699     */
2700    if (!has_alpha) {
2701       struct lp_type alpha_type = fs_type;
2702       alpha_type.length = 4;
2703       convert_alpha(gallivm, row_type, alpha_type,
2704                     block_size, block_height,
2705                     src_count, dst_channels,
2706                     pad_inline, src_alpha);
2707       if (dual_source_blend) {
2708          convert_alpha(gallivm, row_type, alpha_type,
2709                        block_size, block_height,
2710                        src_count, dst_channels,
2711                        pad_inline, src1_alpha);
2712       }
2713    }
2714 
2715 
2716    /*
2717     * Load dst from memory
2718     */
2719    if (src_count < block_height) {
2720       dst_count = block_height;
2721    } else {
2722       dst_count = src_count;
2723    }
2724 
2725    dst_type.length *= block_size / dst_count;
2726 
2727    if (format_expands_to_float_soa(out_format_desc)) {
2728       /*
2729        * we need multiple values at once for the conversion, so can as well
2730        * load them vectorized here too instead of concatenating later.
2731        * (Still need concatenation later for 8-wide vectors).
2732        */
2733       dst_count = block_height;
2734       dst_type.length = block_width;
2735    }
2736 
2737    /*
2738     * Compute the alignment of the destination pointer in bytes
2739     * We fetch 1-4 pixels, if the format has pot alignment then those fetches
2740     * are always aligned by MIN2(16, fetch_width) except for buffers (not
2741     * 1d tex but can't distinguish here) so need to stick with per-pixel
2742     * alignment in this case.
2743     */
2744    if (is_1d) {
2745       dst_alignment = (out_format_desc->block.bits + 7)/(out_format_desc->block.width * 8);
2746    }
2747    else {
2748       dst_alignment = dst_type.length * dst_type.width / 8;
2749    }
2750    /* Force power-of-two alignment by extracting only the least-significant-bit */
2751    dst_alignment = 1 << (ffs(dst_alignment) - 1);
2752    /*
2753     * Resource base and stride pointers are aligned to 16 bytes, so that's
2754     * the maximum alignment we can guarantee
2755     */
2756    dst_alignment = MIN2(16, dst_alignment);
2757 
2758    ls_type = dst_type;
2759 
2760    if (dst_count > src_count) {
2761       if ((dst_type.width == 8 || dst_type.width == 16) &&
2762           util_is_power_of_two_or_zero(dst_type.length) &&
2763           dst_type.length * dst_type.width < 128) {
2764          /*
2765           * Never try to load values as 4xi8 which we will then
2766           * concatenate to larger vectors. This gives llvm a real
2767           * headache (the problem is the type legalizer (?) will
2768           * try to load that as 4xi8 zext to 4xi32 to fill the vector,
2769           * then the shuffles to concatenate are more or less impossible
2770           * - llvm is easily capable of generating a sequence of 32
2771           * pextrb/pinsrb instructions for that. Albeit it appears to
2772           * be fixed in llvm 4.0. So, load and concatenate with 32bit
2773           * width to avoid the trouble (16bit seems not as bad, llvm
2774           * probably recognizes the load+shuffle as only one shuffle
2775           * is necessary, but we can do just the same anyway).
2776           */
2777          ls_type.length = dst_type.length * dst_type.width / 32;
2778          ls_type.width = 32;
2779       }
2780    }
2781 
2782    if (is_1d) {
2783       load_unswizzled_block(gallivm, color_ptr, stride, block_width, 1,
2784                             dst, ls_type, dst_count / 4, dst_alignment, NULL, NULL, false);
2785       for (i = dst_count / 4; i < dst_count; i++) {
2786          dst[i] = lp_build_undef(gallivm, ls_type);
2787       }
2788 
2789    }
2790    else {
2791       load_unswizzled_block(gallivm, color_ptr, stride, block_width, block_height,
2792                             dst, ls_type, dst_count, dst_alignment, NULL, NULL, false);
2793    }
2794 
2795 
2796    /*
2797     * Convert from dst/output format to src/blending format.
2798     *
2799     * This is necessary as we can only read 1 row from memory at a time,
2800     * so the minimum dst_count will ever be at this point is 4.
2801     *
2802     * With, for example, R8 format you can have all 16 pixels in a 128 bit vector,
2803     * this will take the 4 dsts and combine them into 1 src so we can perform blending
2804     * on all 16 pixels in that single vector at once.
2805     */
2806    if (dst_count > src_count) {
2807       if (ls_type.length != dst_type.length && ls_type.length == 1) {
2808          LLVMTypeRef elem_type = lp_build_elem_type(gallivm, ls_type);
2809          LLVMTypeRef ls_vec_type = LLVMVectorType(elem_type, 1);
2810          for (i = 0; i < dst_count; i++) {
2811             dst[i] = LLVMBuildBitCast(builder, dst[i], ls_vec_type, "");
2812          }
2813       }
2814 
2815       lp_build_concat_n(gallivm, ls_type, dst, 4, dst, src_count);
2816 
2817       if (ls_type.length != dst_type.length) {
2818          struct lp_type tmp_type = dst_type;
2819          tmp_type.length = dst_type.length * 4 / src_count;
2820          for (i = 0; i < src_count; i++) {
2821             dst[i] = LLVMBuildBitCast(builder, dst[i],
2822                                       lp_build_vec_type(gallivm, tmp_type), "");
2823          }
2824       }
2825    }
2826 
2827    /*
2828     * Blending
2829     */
2830    /* XXX this is broken for RGB8 formats -
2831     * they get expanded from 12 to 16 elements (to include alpha)
2832     * by convert_to_blend_type then reduced to 15 instead of 12
2833     * by convert_from_blend_type (a simple fix though breaks A8...).
2834     * R16G16B16 also crashes differently however something going wrong
2835     * inside llvm handling npot vector sizes seemingly.
2836     * It seems some cleanup could be done here (like skipping conversion/blend
2837     * when not needed).
2838     */
2839    convert_to_blend_type(gallivm, block_size, out_format_desc, dst_type,
2840                          row_type, dst, src_count);
2841 
2842    /*
2843     * FIXME: Really should get logic ops / masks out of generic blend / row
2844     * format. Logic ops will definitely not work on the blend float format
2845     * used for SRGB here and I think OpenGL expects this to work as expected
2846     * (that is incoming values converted to srgb then logic op applied).
2847     */
2848    for (i = 0; i < src_count; ++i) {
2849       dst[i] = lp_build_blend_aos(gallivm,
2850                                   &variant->key.blend,
2851                                   out_format,
2852                                   row_type,
2853                                   rt,
2854                                   src[i],
2855                                   has_alpha ? NULL : src_alpha[i],
2856                                   src1[i],
2857                                   has_alpha ? NULL : src1_alpha[i],
2858                                   dst[i],
2859                                   partial_mask ? src_mask[i] : NULL,
2860                                   blend_color,
2861                                   has_alpha ? NULL : blend_alpha,
2862                                   swizzle,
2863                                   pad_inline ? 4 : dst_channels);
2864    }
2865 
2866    convert_from_blend_type(gallivm, block_size, out_format_desc,
2867                            row_type, dst_type, dst, src_count);
2868 
2869    /* Split the blend rows back to memory rows */
2870    if (dst_count > src_count) {
2871       row_type.length = dst_type.length * (dst_count / src_count);
2872 
2873       if (src_count == 1) {
2874          dst[1] = lp_build_extract_range(gallivm, dst[0], row_type.length / 2, row_type.length / 2);
2875          dst[0] = lp_build_extract_range(gallivm, dst[0], 0, row_type.length / 2);
2876 
2877          row_type.length /= 2;
2878          src_count *= 2;
2879       }
2880 
2881       dst[3] = lp_build_extract_range(gallivm, dst[1], row_type.length / 2, row_type.length / 2);
2882       dst[2] = lp_build_extract_range(gallivm, dst[1], 0, row_type.length / 2);
2883       dst[1] = lp_build_extract_range(gallivm, dst[0], row_type.length / 2, row_type.length / 2);
2884       dst[0] = lp_build_extract_range(gallivm, dst[0], 0, row_type.length / 2);
2885 
2886       row_type.length /= 2;
2887       src_count *= 2;
2888    }
2889 
2890    /*
2891     * Store blend result to memory
2892     */
2893    if (is_1d) {
2894       store_unswizzled_block(gallivm, color_ptr, stride, block_width, 1,
2895                              dst, dst_type, dst_count / 4, dst_alignment);
2896    }
2897    else {
2898       store_unswizzled_block(gallivm, color_ptr, stride, block_width, block_height,
2899                              dst, dst_type, dst_count, dst_alignment);
2900    }
2901 
2902    if (have_smallfloat_format(dst_type, out_format)) {
2903       lp_build_fpstate_set(gallivm, fpstate);
2904    }
2905 
2906    if (do_branch) {
2907       lp_build_mask_end(&mask_ctx);
2908    }
2909 }
2910 
2911 
2912 /**
2913  * Generate the runtime callable function for the whole fragment pipeline.
2914  * Note that the function which we generate operates on a block of 16
2915  * pixels at at time.  The block contains 2x2 quads.  Each quad contains
2916  * 2x2 pixels.
2917  */
2918 static void
generate_fragment(struct llvmpipe_context * lp,struct lp_fragment_shader * shader,struct lp_fragment_shader_variant * variant,unsigned partial_mask)2919 generate_fragment(struct llvmpipe_context *lp,
2920                   struct lp_fragment_shader *shader,
2921                   struct lp_fragment_shader_variant *variant,
2922                   unsigned partial_mask)
2923 {
2924    struct gallivm_state *gallivm = variant->gallivm;
2925    struct lp_fragment_shader_variant_key *key = &variant->key;
2926    struct lp_shader_input inputs[PIPE_MAX_SHADER_INPUTS];
2927    char func_name[64];
2928    struct lp_type fs_type;
2929    struct lp_type blend_type;
2930    LLVMTypeRef fs_elem_type;
2931    LLVMTypeRef blend_vec_type;
2932    LLVMTypeRef arg_types[15];
2933    LLVMTypeRef func_type;
2934    LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
2935    LLVMTypeRef int8_type = LLVMInt8TypeInContext(gallivm->context);
2936    LLVMValueRef context_ptr;
2937    LLVMValueRef x;
2938    LLVMValueRef y;
2939    LLVMValueRef a0_ptr;
2940    LLVMValueRef dadx_ptr;
2941    LLVMValueRef dady_ptr;
2942    LLVMValueRef color_ptr_ptr;
2943    LLVMValueRef stride_ptr;
2944    LLVMValueRef color_sample_stride_ptr;
2945    LLVMValueRef depth_ptr;
2946    LLVMValueRef depth_stride;
2947    LLVMValueRef depth_sample_stride;
2948    LLVMValueRef mask_input;
2949    LLVMValueRef thread_data_ptr;
2950    LLVMBasicBlockRef block;
2951    LLVMBuilderRef builder;
2952    struct lp_build_sampler_soa *sampler;
2953    struct lp_build_image_soa *image;
2954    struct lp_build_interp_soa_context interp;
2955    LLVMValueRef fs_mask[(16 / 4) * LP_MAX_SAMPLES];
2956    LLVMValueRef fs_out_color[LP_MAX_SAMPLES][PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS][16 / 4];
2957    LLVMValueRef function;
2958    LLVMValueRef facing;
2959    unsigned num_fs;
2960    unsigned i;
2961    unsigned chan;
2962    unsigned cbuf;
2963    boolean cbuf0_write_all;
2964    const boolean dual_source_blend = key->blend.rt[0].blend_enable &&
2965                                      util_blend_state_is_dual(&key->blend, 0);
2966 
2967    assert(lp_native_vector_width / 32 >= 4);
2968 
2969    /* Adjust color input interpolation according to flatshade state:
2970     */
2971    memcpy(inputs, shader->inputs, shader->info.base.num_inputs * sizeof inputs[0]);
2972    for (i = 0; i < shader->info.base.num_inputs; i++) {
2973       if (inputs[i].interp == LP_INTERP_COLOR) {
2974 	 if (key->flatshade)
2975 	    inputs[i].interp = LP_INTERP_CONSTANT;
2976 	 else
2977 	    inputs[i].interp = LP_INTERP_PERSPECTIVE;
2978       }
2979    }
2980 
2981    /* check if writes to cbuf[0] are to be copied to all cbufs */
2982    cbuf0_write_all =
2983      shader->info.base.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
2984 
2985    /* TODO: actually pick these based on the fs and color buffer
2986     * characteristics. */
2987 
2988    memset(&fs_type, 0, sizeof fs_type);
2989    fs_type.floating = TRUE;      /* floating point values */
2990    fs_type.sign = TRUE;          /* values are signed */
2991    fs_type.norm = FALSE;         /* values are not limited to [0,1] or [-1,1] */
2992    fs_type.width = 32;           /* 32-bit float */
2993    fs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
2994 
2995    memset(&blend_type, 0, sizeof blend_type);
2996    blend_type.floating = FALSE; /* values are integers */
2997    blend_type.sign = FALSE;     /* values are unsigned */
2998    blend_type.norm = TRUE;      /* values are in [0,1] or [-1,1] */
2999    blend_type.width = 8;        /* 8-bit ubyte values */
3000    blend_type.length = 16;      /* 16 elements per vector */
3001 
3002    /*
3003     * Generate the function prototype. Any change here must be reflected in
3004     * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
3005     */
3006 
3007    fs_elem_type = lp_build_elem_type(gallivm, fs_type);
3008 
3009    blend_vec_type = lp_build_vec_type(gallivm, blend_type);
3010 
3011    snprintf(func_name, sizeof(func_name), "fs_variant_%s",
3012             partial_mask ? "partial" : "whole");
3013 
3014    arg_types[0] = variant->jit_context_ptr_type;       /* context */
3015    arg_types[1] = int32_type;                          /* x */
3016    arg_types[2] = int32_type;                          /* y */
3017    arg_types[3] = int32_type;                          /* facing */
3018    arg_types[4] = LLVMPointerType(fs_elem_type, 0);    /* a0 */
3019    arg_types[5] = LLVMPointerType(fs_elem_type, 0);    /* dadx */
3020    arg_types[6] = LLVMPointerType(fs_elem_type, 0);    /* dady */
3021    arg_types[7] = LLVMPointerType(LLVMPointerType(int8_type, 0), 0);  /* color */
3022    arg_types[8] = LLVMPointerType(int8_type, 0);       /* depth */
3023    arg_types[9] = LLVMInt64TypeInContext(gallivm->context);  /* mask_input */
3024    arg_types[10] = variant->jit_thread_data_ptr_type;  /* per thread data */
3025    arg_types[11] = LLVMPointerType(int32_type, 0);     /* stride */
3026    arg_types[12] = int32_type;                         /* depth_stride */
3027    arg_types[13] = LLVMPointerType(int32_type, 0);     /* color sample strides */
3028    arg_types[14] = int32_type;                         /* depth sample stride */
3029 
3030    func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
3031                                 arg_types, ARRAY_SIZE(arg_types), 0);
3032 
3033    function = LLVMAddFunction(gallivm->module, func_name, func_type);
3034    LLVMSetFunctionCallConv(function, LLVMCCallConv);
3035 
3036    variant->function[partial_mask] = function;
3037 
3038    /* XXX: need to propagate noalias down into color param now we are
3039     * passing a pointer-to-pointer?
3040     */
3041    for(i = 0; i < ARRAY_SIZE(arg_types); ++i)
3042       if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
3043          lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
3044 
3045    if (variant->gallivm->cache->data_size)
3046       return;
3047 
3048    context_ptr  = LLVMGetParam(function, 0);
3049    x            = LLVMGetParam(function, 1);
3050    y            = LLVMGetParam(function, 2);
3051    facing       = LLVMGetParam(function, 3);
3052    a0_ptr       = LLVMGetParam(function, 4);
3053    dadx_ptr     = LLVMGetParam(function, 5);
3054    dady_ptr     = LLVMGetParam(function, 6);
3055    color_ptr_ptr = LLVMGetParam(function, 7);
3056    depth_ptr    = LLVMGetParam(function, 8);
3057    mask_input   = LLVMGetParam(function, 9);
3058    thread_data_ptr  = LLVMGetParam(function, 10);
3059    stride_ptr   = LLVMGetParam(function, 11);
3060    depth_stride = LLVMGetParam(function, 12);
3061    color_sample_stride_ptr = LLVMGetParam(function, 13);
3062    depth_sample_stride = LLVMGetParam(function, 14);
3063 
3064    lp_build_name(context_ptr, "context");
3065    lp_build_name(x, "x");
3066    lp_build_name(y, "y");
3067    lp_build_name(a0_ptr, "a0");
3068    lp_build_name(dadx_ptr, "dadx");
3069    lp_build_name(dady_ptr, "dady");
3070    lp_build_name(color_ptr_ptr, "color_ptr_ptr");
3071    lp_build_name(depth_ptr, "depth");
3072    lp_build_name(mask_input, "mask_input");
3073    lp_build_name(thread_data_ptr, "thread_data");
3074    lp_build_name(stride_ptr, "stride_ptr");
3075    lp_build_name(depth_stride, "depth_stride");
3076    lp_build_name(color_sample_stride_ptr, "color_sample_stride_ptr");
3077    lp_build_name(depth_sample_stride, "depth_sample_stride");
3078 
3079    /*
3080     * Function body
3081     */
3082 
3083    block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
3084    builder = gallivm->builder;
3085    assert(builder);
3086    LLVMPositionBuilderAtEnd(builder, block);
3087 
3088    /*
3089     * Must not count ps invocations if there's a null shader.
3090     * (It would be ok to count with null shader if there's d/s tests,
3091     * but only if there's d/s buffers too, which is different
3092     * to implicit rasterization disable which must not depend
3093     * on the d/s buffers.)
3094     * Could use popcount on mask, but pixel accuracy is not required.
3095     * Could disable if there's no stats query, but maybe not worth it.
3096     */
3097    if (shader->info.base.num_instructions > 1) {
3098       LLVMValueRef invocs, val;
3099       invocs = lp_jit_thread_data_invocations(gallivm, thread_data_ptr);
3100       val = LLVMBuildLoad(builder, invocs, "");
3101       val = LLVMBuildAdd(builder, val,
3102                          LLVMConstInt(LLVMInt64TypeInContext(gallivm->context), 1, 0),
3103                          "invoc_count");
3104       LLVMBuildStore(builder, val, invocs);
3105    }
3106 
3107    /* code generated texture sampling */
3108    sampler = lp_llvm_sampler_soa_create(key->samplers, key->nr_samplers);
3109    image = lp_llvm_image_soa_create(lp_fs_variant_key_images(key), key->nr_images);
3110 
3111    num_fs = 16 / fs_type.length; /* number of loops per 4x4 stamp */
3112    /* for 1d resources only run "upper half" of stamp */
3113    if (key->resource_1d)
3114       num_fs /= 2;
3115 
3116    {
3117       LLVMValueRef num_loop = lp_build_const_int32(gallivm, num_fs);
3118       LLVMTypeRef mask_type = lp_build_int_vec_type(gallivm, fs_type);
3119       LLVMValueRef num_loop_samp = lp_build_const_int32(gallivm, num_fs * key->coverage_samples);
3120       LLVMValueRef mask_store = lp_build_array_alloca(gallivm, mask_type,
3121                                                       num_loop_samp, "mask_store");
3122 
3123       LLVMTypeRef flt_type = LLVMFloatTypeInContext(gallivm->context);
3124       LLVMValueRef glob_sample_pos = LLVMAddGlobal(gallivm->module, LLVMArrayType(flt_type, key->coverage_samples * 2), "");
3125       LLVMValueRef sample_pos_array;
3126 
3127       if (key->multisample && key->coverage_samples == 4) {
3128          LLVMValueRef sample_pos_arr[8];
3129          for (unsigned i = 0; i < 4; i++) {
3130             sample_pos_arr[i * 2] = LLVMConstReal(flt_type, lp_sample_pos_4x[i][0]);
3131             sample_pos_arr[i * 2 + 1] = LLVMConstReal(flt_type, lp_sample_pos_4x[i][1]);
3132          }
3133          sample_pos_array = LLVMConstArray(LLVMFloatTypeInContext(gallivm->context), sample_pos_arr, 8);
3134       } else {
3135          LLVMValueRef sample_pos_arr[2];
3136          sample_pos_arr[0] = LLVMConstReal(flt_type, 0.5);
3137          sample_pos_arr[1] = LLVMConstReal(flt_type, 0.5);
3138          sample_pos_array = LLVMConstArray(LLVMFloatTypeInContext(gallivm->context), sample_pos_arr, 2);
3139       }
3140       LLVMSetInitializer(glob_sample_pos, sample_pos_array);
3141 
3142       LLVMValueRef color_store[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS];
3143       boolean pixel_center_integer =
3144          shader->info.base.properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER];
3145 
3146       /*
3147        * The shader input interpolation info is not explicitely baked in the
3148        * shader key, but everything it derives from (TGSI, and flatshade) is
3149        * already included in the shader key.
3150        */
3151       lp_build_interp_soa_init(&interp,
3152                                gallivm,
3153                                shader->info.base.num_inputs,
3154                                inputs,
3155                                pixel_center_integer,
3156                                key->coverage_samples, glob_sample_pos,
3157                                num_loop,
3158                                key->depth_clamp,
3159                                builder, fs_type,
3160                                a0_ptr, dadx_ptr, dady_ptr,
3161                                x, y);
3162 
3163       for (i = 0; i < num_fs; i++) {
3164          if (key->multisample) {
3165             LLVMValueRef smask_val = LLVMBuildLoad(builder, lp_jit_context_sample_mask(gallivm, context_ptr), "");
3166 
3167             /*
3168              * For multisampling, extract the per-sample mask from the incoming 64-bit mask,
3169              * store to the per sample mask storage. Or all of them together to generate
3170              * the fragment shader mask. (sample shading TODO).
3171              * Take the incoming state coverage mask into account.
3172              */
3173             for (unsigned s = 0; s < key->coverage_samples; s++) {
3174                LLVMValueRef sindexi = lp_build_const_int32(gallivm, i + (s * num_fs));
3175                LLVMValueRef sample_mask_ptr = LLVMBuildGEP(builder, mask_store,
3176                                                            &sindexi, 1, "sample_mask_ptr");
3177                LLVMValueRef s_mask = generate_quad_mask(gallivm, fs_type,
3178                                                         i*fs_type.length/4, s, mask_input);
3179 
3180                LLVMValueRef smask_bit = LLVMBuildAnd(builder, smask_val, lp_build_const_int32(gallivm, (1 << s)), "");
3181                LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntNE, smask_bit, lp_build_const_int32(gallivm, 0), "");
3182                smask_bit = LLVMBuildSExt(builder, cmp, int32_type, "");
3183                smask_bit = lp_build_broadcast(gallivm, mask_type, smask_bit);
3184 
3185                s_mask = LLVMBuildAnd(builder, s_mask, smask_bit, "");
3186                LLVMBuildStore(builder, s_mask, sample_mask_ptr);
3187             }
3188          } else {
3189             LLVMValueRef mask;
3190             LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
3191             LLVMValueRef mask_ptr = LLVMBuildGEP(builder, mask_store,
3192                                                  &indexi, 1, "mask_ptr");
3193 
3194             if (partial_mask) {
3195                mask = generate_quad_mask(gallivm, fs_type,
3196                                          i*fs_type.length/4, 0, mask_input);
3197             }
3198             else {
3199                mask = lp_build_const_int_vec(gallivm, fs_type, ~0);
3200             }
3201             LLVMBuildStore(builder, mask, mask_ptr);
3202          }
3203       }
3204 
3205       generate_fs_loop(gallivm,
3206                        shader, key,
3207                        builder,
3208                        fs_type,
3209                        context_ptr,
3210                        glob_sample_pos,
3211                        num_loop,
3212                        &interp,
3213                        sampler,
3214                        image,
3215                        mask_store, /* output */
3216                        color_store,
3217                        depth_ptr,
3218                        depth_stride,
3219                        depth_sample_stride,
3220                        color_ptr_ptr,
3221                        stride_ptr,
3222                        color_sample_stride_ptr,
3223                        facing,
3224                        thread_data_ptr);
3225 
3226       for (i = 0; i < num_fs; i++) {
3227          LLVMValueRef ptr;
3228          for (unsigned s = 0; s < key->coverage_samples; s++) {
3229             int idx = (i + (s * num_fs));
3230             LLVMValueRef sindexi = lp_build_const_int32(gallivm, idx);
3231             ptr = LLVMBuildGEP(builder, mask_store, &sindexi, 1, "");
3232 
3233             fs_mask[idx] = LLVMBuildLoad(builder, ptr, "smask");
3234          }
3235 
3236          for (unsigned s = 0; s < key->min_samples; s++) {
3237             /* This is fucked up need to reorganize things */
3238             int idx = s * num_fs + i;
3239             LLVMValueRef sindexi = lp_build_const_int32(gallivm, idx);
3240             for (cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
3241                for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
3242                   ptr = LLVMBuildGEP(builder,
3243                                      color_store[cbuf * !cbuf0_write_all][chan],
3244                                      &sindexi, 1, "");
3245                   fs_out_color[s][cbuf][chan][i] = ptr;
3246                }
3247             }
3248             if (dual_source_blend) {
3249                /* only support one dual source blend target hence always use output 1 */
3250                for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
3251                   ptr = LLVMBuildGEP(builder,
3252                                      color_store[1][chan],
3253                                      &sindexi, 1, "");
3254                   fs_out_color[s][1][chan][i] = ptr;
3255                }
3256             }
3257          }
3258       }
3259    }
3260 
3261    sampler->destroy(sampler);
3262    image->destroy(image);
3263    /* Loop over color outputs / color buffers to do blending.
3264     */
3265    for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
3266       if (key->cbuf_format[cbuf] != PIPE_FORMAT_NONE) {
3267          LLVMValueRef color_ptr;
3268          LLVMValueRef stride;
3269          LLVMValueRef sample_stride = NULL;
3270          LLVMValueRef index = lp_build_const_int32(gallivm, cbuf);
3271 
3272          boolean do_branch = ((key->depth.enabled
3273                                || key->stencil[0].enabled
3274                                || key->alpha.enabled)
3275                               && !shader->info.base.uses_kill);
3276 
3277          color_ptr = LLVMBuildLoad(builder,
3278                                    LLVMBuildGEP(builder, color_ptr_ptr,
3279                                                 &index, 1, ""),
3280                                    "");
3281 
3282          stride = LLVMBuildLoad(builder,
3283                                 LLVMBuildGEP(builder, stride_ptr, &index, 1, ""),
3284                                 "");
3285 
3286          if (key->multisample)
3287             sample_stride = LLVMBuildLoad(builder,
3288                                           LLVMBuildGEP(builder, color_sample_stride_ptr,
3289                                                        &index, 1, ""), "");
3290 
3291          for (unsigned s = 0; s < key->cbuf_nr_samples[cbuf]; s++) {
3292             unsigned mask_idx = num_fs * (key->multisample ? s : 0);
3293             unsigned out_idx = key->min_samples == 1 ? 0 : s;
3294             LLVMValueRef out_ptr = color_ptr;;
3295 
3296             if (key->multisample) {
3297                LLVMValueRef sample_offset = LLVMBuildMul(builder, sample_stride, lp_build_const_int32(gallivm, s), "");
3298                out_ptr = LLVMBuildGEP(builder, out_ptr, &sample_offset, 1, "");
3299             }
3300             out_ptr = LLVMBuildBitCast(builder, out_ptr, LLVMPointerType(blend_vec_type, 0), "");
3301 
3302             lp_build_name(out_ptr, "color_ptr%d", cbuf);
3303 
3304             generate_unswizzled_blend(gallivm, cbuf, variant,
3305                                       key->cbuf_format[cbuf],
3306                                       num_fs, fs_type, &fs_mask[mask_idx], fs_out_color[out_idx],
3307                                       context_ptr, out_ptr, stride,
3308                                       partial_mask, do_branch);
3309          }
3310       }
3311    }
3312 
3313    LLVMBuildRetVoid(builder);
3314 
3315    gallivm_verify_function(gallivm, function);
3316 }
3317 
3318 
3319 static void
dump_fs_variant_key(struct lp_fragment_shader_variant_key * key)3320 dump_fs_variant_key(struct lp_fragment_shader_variant_key *key)
3321 {
3322    unsigned i;
3323 
3324    debug_printf("fs variant %p:\n", (void *) key);
3325 
3326    if (key->flatshade) {
3327       debug_printf("flatshade = 1\n");
3328    }
3329    if (key->multisample) {
3330       debug_printf("multisample = 1\n");
3331       debug_printf("coverage samples = %d\n", key->coverage_samples);
3332       debug_printf("min samples = %d\n", key->min_samples);
3333    }
3334    for (i = 0; i < key->nr_cbufs; ++i) {
3335       debug_printf("cbuf_format[%u] = %s\n", i, util_format_name(key->cbuf_format[i]));
3336       debug_printf("cbuf nr_samples[%u] = %d\n", i, key->cbuf_nr_samples[i]);
3337    }
3338    if (key->depth.enabled || key->stencil[0].enabled) {
3339       debug_printf("depth.format = %s\n", util_format_name(key->zsbuf_format));
3340       debug_printf("depth nr_samples = %d\n", key->zsbuf_nr_samples);
3341    }
3342    if (key->depth.enabled) {
3343       debug_printf("depth.func = %s\n", util_str_func(key->depth.func, TRUE));
3344       debug_printf("depth.writemask = %u\n", key->depth.writemask);
3345    }
3346 
3347    for (i = 0; i < 2; ++i) {
3348       if (key->stencil[i].enabled) {
3349          debug_printf("stencil[%u].func = %s\n", i, util_str_func(key->stencil[i].func, TRUE));
3350          debug_printf("stencil[%u].fail_op = %s\n", i, util_str_stencil_op(key->stencil[i].fail_op, TRUE));
3351          debug_printf("stencil[%u].zpass_op = %s\n", i, util_str_stencil_op(key->stencil[i].zpass_op, TRUE));
3352          debug_printf("stencil[%u].zfail_op = %s\n", i, util_str_stencil_op(key->stencil[i].zfail_op, TRUE));
3353          debug_printf("stencil[%u].valuemask = 0x%x\n", i, key->stencil[i].valuemask);
3354          debug_printf("stencil[%u].writemask = 0x%x\n", i, key->stencil[i].writemask);
3355       }
3356    }
3357 
3358    if (key->alpha.enabled) {
3359       debug_printf("alpha.func = %s\n", util_str_func(key->alpha.func, TRUE));
3360    }
3361 
3362    if (key->occlusion_count) {
3363       debug_printf("occlusion_count = 1\n");
3364    }
3365 
3366    if (key->blend.logicop_enable) {
3367       debug_printf("blend.logicop_func = %s\n", util_str_logicop(key->blend.logicop_func, TRUE));
3368    }
3369    else if (key->blend.rt[0].blend_enable) {
3370       debug_printf("blend.rgb_func = %s\n",   util_str_blend_func  (key->blend.rt[0].rgb_func, TRUE));
3371       debug_printf("blend.rgb_src_factor = %s\n",   util_str_blend_factor(key->blend.rt[0].rgb_src_factor, TRUE));
3372       debug_printf("blend.rgb_dst_factor = %s\n",   util_str_blend_factor(key->blend.rt[0].rgb_dst_factor, TRUE));
3373       debug_printf("blend.alpha_func = %s\n",       util_str_blend_func  (key->blend.rt[0].alpha_func, TRUE));
3374       debug_printf("blend.alpha_src_factor = %s\n", util_str_blend_factor(key->blend.rt[0].alpha_src_factor, TRUE));
3375       debug_printf("blend.alpha_dst_factor = %s\n", util_str_blend_factor(key->blend.rt[0].alpha_dst_factor, TRUE));
3376    }
3377    debug_printf("blend.colormask = 0x%x\n", key->blend.rt[0].colormask);
3378    if (key->blend.alpha_to_coverage) {
3379       debug_printf("blend.alpha_to_coverage is enabled\n");
3380    }
3381    for (i = 0; i < key->nr_samplers; ++i) {
3382       const struct lp_static_sampler_state *sampler = &key->samplers[i].sampler_state;
3383       debug_printf("sampler[%u] = \n", i);
3384       debug_printf("  .wrap = %s %s %s\n",
3385                    util_str_tex_wrap(sampler->wrap_s, TRUE),
3386                    util_str_tex_wrap(sampler->wrap_t, TRUE),
3387                    util_str_tex_wrap(sampler->wrap_r, TRUE));
3388       debug_printf("  .min_img_filter = %s\n",
3389                    util_str_tex_filter(sampler->min_img_filter, TRUE));
3390       debug_printf("  .min_mip_filter = %s\n",
3391                    util_str_tex_mipfilter(sampler->min_mip_filter, TRUE));
3392       debug_printf("  .mag_img_filter = %s\n",
3393                    util_str_tex_filter(sampler->mag_img_filter, TRUE));
3394       if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE)
3395          debug_printf("  .compare_func = %s\n", util_str_func(sampler->compare_func, TRUE));
3396       debug_printf("  .normalized_coords = %u\n", sampler->normalized_coords);
3397       debug_printf("  .min_max_lod_equal = %u\n", sampler->min_max_lod_equal);
3398       debug_printf("  .lod_bias_non_zero = %u\n", sampler->lod_bias_non_zero);
3399       debug_printf("  .apply_min_lod = %u\n", sampler->apply_min_lod);
3400       debug_printf("  .apply_max_lod = %u\n", sampler->apply_max_lod);
3401    }
3402    for (i = 0; i < key->nr_sampler_views; ++i) {
3403       const struct lp_static_texture_state *texture = &key->samplers[i].texture_state;
3404       debug_printf("texture[%u] = \n", i);
3405       debug_printf("  .format = %s\n",
3406                    util_format_name(texture->format));
3407       debug_printf("  .target = %s\n",
3408                    util_str_tex_target(texture->target, TRUE));
3409       debug_printf("  .level_zero_only = %u\n",
3410                    texture->level_zero_only);
3411       debug_printf("  .pot = %u %u %u\n",
3412                    texture->pot_width,
3413                    texture->pot_height,
3414                    texture->pot_depth);
3415    }
3416    struct lp_image_static_state *images = lp_fs_variant_key_images(key);
3417    for (i = 0; i < key->nr_images; ++i) {
3418       const struct lp_static_texture_state *image = &images[i].image_state;
3419       debug_printf("image[%u] = \n", i);
3420       debug_printf("  .format = %s\n",
3421                    util_format_name(image->format));
3422       debug_printf("  .target = %s\n",
3423                    util_str_tex_target(image->target, TRUE));
3424       debug_printf("  .level_zero_only = %u\n",
3425                    image->level_zero_only);
3426       debug_printf("  .pot = %u %u %u\n",
3427                    image->pot_width,
3428                    image->pot_height,
3429                    image->pot_depth);
3430    }
3431 }
3432 
3433 
3434 void
lp_debug_fs_variant(struct lp_fragment_shader_variant * variant)3435 lp_debug_fs_variant(struct lp_fragment_shader_variant *variant)
3436 {
3437    debug_printf("llvmpipe: Fragment shader #%u variant #%u:\n",
3438                 variant->shader->no, variant->no);
3439    if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
3440       tgsi_dump(variant->shader->base.tokens, 0);
3441    else
3442       nir_print_shader(variant->shader->base.ir.nir, stderr);
3443    dump_fs_variant_key(&variant->key);
3444    debug_printf("variant->opaque = %u\n", variant->opaque);
3445    debug_printf("\n");
3446 }
3447 
3448 static void
lp_fs_get_ir_cache_key(struct lp_fragment_shader_variant * variant,unsigned char ir_sha1_cache_key[20])3449 lp_fs_get_ir_cache_key(struct lp_fragment_shader_variant *variant,
3450                             unsigned char ir_sha1_cache_key[20])
3451 {
3452    struct blob blob = { 0 };
3453    unsigned ir_size;
3454    void *ir_binary;
3455 
3456    blob_init(&blob);
3457    nir_serialize(&blob, variant->shader->base.ir.nir, true);
3458    ir_binary = blob.data;
3459    ir_size = blob.size;
3460 
3461    struct mesa_sha1 ctx;
3462    _mesa_sha1_init(&ctx);
3463    _mesa_sha1_update(&ctx, &variant->key, variant->shader->variant_key_size);
3464    _mesa_sha1_update(&ctx, ir_binary, ir_size);
3465    _mesa_sha1_final(&ctx, ir_sha1_cache_key);
3466 
3467    blob_finish(&blob);
3468 }
3469 
3470 /**
3471  * Generate a new fragment shader variant from the shader code and
3472  * other state indicated by the key.
3473  */
3474 static struct lp_fragment_shader_variant *
generate_variant(struct llvmpipe_context * lp,struct lp_fragment_shader * shader,const struct lp_fragment_shader_variant_key * key)3475 generate_variant(struct llvmpipe_context *lp,
3476                  struct lp_fragment_shader *shader,
3477                  const struct lp_fragment_shader_variant_key *key)
3478 {
3479    struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
3480    struct lp_fragment_shader_variant *variant;
3481    const struct util_format_description *cbuf0_format_desc = NULL;
3482    boolean fullcolormask;
3483    char module_name[64];
3484    unsigned char ir_sha1_cache_key[20];
3485    struct lp_cached_code cached = { 0 };
3486    bool needs_caching = false;
3487    variant = MALLOC(sizeof *variant + shader->variant_key_size - sizeof variant->key);
3488    if (!variant)
3489       return NULL;
3490 
3491    memset(variant, 0, sizeof(*variant));
3492    snprintf(module_name, sizeof(module_name), "fs%u_variant%u",
3493             shader->no, shader->variants_created);
3494 
3495    pipe_reference_init(&variant->reference, 1);
3496    lp_fs_reference(lp, &variant->shader, shader);
3497 
3498    memcpy(&variant->key, key, shader->variant_key_size);
3499 
3500    if (shader->base.ir.nir) {
3501       lp_fs_get_ir_cache_key(variant, ir_sha1_cache_key);
3502 
3503       lp_disk_cache_find_shader(screen, &cached, ir_sha1_cache_key);
3504       if (!cached.data_size)
3505          needs_caching = true;
3506    }
3507    variant->gallivm = gallivm_create(module_name, lp->context, &cached);
3508    if (!variant->gallivm) {
3509       FREE(variant);
3510       return NULL;
3511    }
3512 
3513    variant->list_item_global.base = variant;
3514    variant->list_item_local.base = variant;
3515    variant->no = shader->variants_created++;
3516 
3517 
3518 
3519    /*
3520     * Determine whether we are touching all channels in the color buffer.
3521     */
3522    fullcolormask = FALSE;
3523    if (key->nr_cbufs == 1) {
3524       cbuf0_format_desc = util_format_description(key->cbuf_format[0]);
3525       fullcolormask = util_format_colormask_full(cbuf0_format_desc, key->blend.rt[0].colormask);
3526    }
3527 
3528    variant->opaque =
3529          !key->blend.logicop_enable &&
3530          !key->blend.rt[0].blend_enable &&
3531          fullcolormask &&
3532          !key->stencil[0].enabled &&
3533          !key->alpha.enabled &&
3534          !key->multisample &&
3535          !key->blend.alpha_to_coverage &&
3536          !key->depth.enabled &&
3537          !shader->info.base.uses_kill &&
3538          !shader->info.base.writes_samplemask
3539       ? TRUE : FALSE;
3540 
3541    if ((LP_DEBUG & DEBUG_FS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
3542       lp_debug_fs_variant(variant);
3543    }
3544 
3545    lp_jit_init_types(variant);
3546 
3547    if (variant->jit_function[RAST_EDGE_TEST] == NULL)
3548       generate_fragment(lp, shader, variant, RAST_EDGE_TEST);
3549 
3550    if (variant->jit_function[RAST_WHOLE] == NULL) {
3551       if (variant->opaque) {
3552          /* Specialized shader, which doesn't need to read the color buffer. */
3553          generate_fragment(lp, shader, variant, RAST_WHOLE);
3554       }
3555    }
3556 
3557    /*
3558     * Compile everything
3559     */
3560 
3561    gallivm_compile_module(variant->gallivm);
3562 
3563    variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
3564 
3565    if (variant->function[RAST_EDGE_TEST]) {
3566       variant->jit_function[RAST_EDGE_TEST] = (lp_jit_frag_func)
3567             gallivm_jit_function(variant->gallivm,
3568                                  variant->function[RAST_EDGE_TEST]);
3569    }
3570 
3571    if (variant->function[RAST_WHOLE]) {
3572          variant->jit_function[RAST_WHOLE] = (lp_jit_frag_func)
3573                gallivm_jit_function(variant->gallivm,
3574                                     variant->function[RAST_WHOLE]);
3575    } else if (!variant->jit_function[RAST_WHOLE]) {
3576       variant->jit_function[RAST_WHOLE] = variant->jit_function[RAST_EDGE_TEST];
3577    }
3578 
3579    if (needs_caching) {
3580       lp_disk_cache_insert_shader(screen, &cached, ir_sha1_cache_key);
3581    }
3582 
3583    gallivm_free_ir(variant->gallivm);
3584 
3585    return variant;
3586 }
3587 
3588 
3589 static void *
llvmpipe_create_fs_state(struct pipe_context * pipe,const struct pipe_shader_state * templ)3590 llvmpipe_create_fs_state(struct pipe_context *pipe,
3591                          const struct pipe_shader_state *templ)
3592 {
3593    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3594    struct lp_fragment_shader *shader;
3595    int nr_samplers;
3596    int nr_sampler_views;
3597    int nr_images;
3598    int i;
3599 
3600    shader = CALLOC_STRUCT(lp_fragment_shader);
3601    if (!shader)
3602       return NULL;
3603 
3604    pipe_reference_init(&shader->reference, 1);
3605    shader->no = fs_no++;
3606    make_empty_list(&shader->variants);
3607 
3608    shader->base.type = templ->type;
3609    if (templ->type == PIPE_SHADER_IR_TGSI) {
3610       /* get/save the summary info for this shader */
3611       lp_build_tgsi_info(templ->tokens, &shader->info);
3612 
3613       /* we need to keep a local copy of the tokens */
3614       shader->base.tokens = tgsi_dup_tokens(templ->tokens);
3615    } else {
3616       shader->base.ir.nir = templ->ir.nir;
3617       nir_tgsi_scan_shader(templ->ir.nir, &shader->info.base, true);
3618    }
3619 
3620    shader->draw_data = draw_create_fragment_shader(llvmpipe->draw, templ);
3621    if (shader->draw_data == NULL) {
3622       FREE((void *) shader->base.tokens);
3623       FREE(shader);
3624       return NULL;
3625    }
3626 
3627    nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
3628    nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
3629    nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
3630    shader->variant_key_size = lp_fs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
3631 
3632    for (i = 0; i < shader->info.base.num_inputs; i++) {
3633       shader->inputs[i].usage_mask = shader->info.base.input_usage_mask[i];
3634       shader->inputs[i].cyl_wrap = shader->info.base.input_cylindrical_wrap[i];
3635       shader->inputs[i].location = shader->info.base.input_interpolate_loc[i];
3636 
3637       switch (shader->info.base.input_interpolate[i]) {
3638       case TGSI_INTERPOLATE_CONSTANT:
3639          shader->inputs[i].interp = LP_INTERP_CONSTANT;
3640          break;
3641       case TGSI_INTERPOLATE_LINEAR:
3642          shader->inputs[i].interp = LP_INTERP_LINEAR;
3643          break;
3644       case TGSI_INTERPOLATE_PERSPECTIVE:
3645          shader->inputs[i].interp = LP_INTERP_PERSPECTIVE;
3646          break;
3647       case TGSI_INTERPOLATE_COLOR:
3648          shader->inputs[i].interp = LP_INTERP_COLOR;
3649          break;
3650       default:
3651          assert(0);
3652          break;
3653       }
3654 
3655       switch (shader->info.base.input_semantic_name[i]) {
3656       case TGSI_SEMANTIC_FACE:
3657          shader->inputs[i].interp = LP_INTERP_FACING;
3658          break;
3659       case TGSI_SEMANTIC_POSITION:
3660          /* Position was already emitted above
3661           */
3662          shader->inputs[i].interp = LP_INTERP_POSITION;
3663          shader->inputs[i].src_index = 0;
3664          continue;
3665       }
3666 
3667       /* XXX this is a completely pointless index map... */
3668       shader->inputs[i].src_index = i+1;
3669    }
3670 
3671    if (LP_DEBUG & DEBUG_TGSI) {
3672       unsigned attrib;
3673       debug_printf("llvmpipe: Create fragment shader #%u %p:\n",
3674                    shader->no, (void *) shader);
3675       tgsi_dump(templ->tokens, 0);
3676       debug_printf("usage masks:\n");
3677       for (attrib = 0; attrib < shader->info.base.num_inputs; ++attrib) {
3678          unsigned usage_mask = shader->info.base.input_usage_mask[attrib];
3679          debug_printf("  IN[%u].%s%s%s%s\n",
3680                       attrib,
3681                       usage_mask & TGSI_WRITEMASK_X ? "x" : "",
3682                       usage_mask & TGSI_WRITEMASK_Y ? "y" : "",
3683                       usage_mask & TGSI_WRITEMASK_Z ? "z" : "",
3684                       usage_mask & TGSI_WRITEMASK_W ? "w" : "");
3685       }
3686       debug_printf("\n");
3687    }
3688 
3689    return shader;
3690 }
3691 
3692 
3693 static void
llvmpipe_bind_fs_state(struct pipe_context * pipe,void * fs)3694 llvmpipe_bind_fs_state(struct pipe_context *pipe, void *fs)
3695 {
3696    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3697    struct lp_fragment_shader *lp_fs = (struct lp_fragment_shader *)fs;
3698    if (llvmpipe->fs == lp_fs)
3699       return;
3700 
3701    draw_bind_fragment_shader(llvmpipe->draw,
3702                              (lp_fs ? lp_fs->draw_data : NULL));
3703 
3704    lp_fs_reference(llvmpipe, &llvmpipe->fs, lp_fs);
3705 
3706    /* invalidate the setup link, NEW_FS will make it update */
3707    lp_setup_set_fs_variant(llvmpipe->setup, NULL);
3708    llvmpipe->dirty |= LP_NEW_FS;
3709 }
3710 
3711 
3712 /**
3713  * Remove shader variant from two lists: the shader's variant list
3714  * and the context's variant list.
3715  */
3716 
3717 static
llvmpipe_remove_shader_variant(struct llvmpipe_context * lp,struct lp_fragment_shader_variant * variant)3718 void llvmpipe_remove_shader_variant(struct llvmpipe_context *lp,
3719                                     struct lp_fragment_shader_variant *variant)
3720 {
3721    if ((LP_DEBUG & DEBUG_FS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
3722       debug_printf("llvmpipe: del fs #%u var %u v created %u v cached %u "
3723                    "v total cached %u inst %u total inst %u\n",
3724                    variant->shader->no, variant->no,
3725                    variant->shader->variants_created,
3726                    variant->shader->variants_cached,
3727                    lp->nr_fs_variants, variant->nr_instrs, lp->nr_fs_instrs);
3728    }
3729 
3730    /* remove from shader's list */
3731    remove_from_list(&variant->list_item_local);
3732    variant->shader->variants_cached--;
3733 
3734    /* remove from context's list */
3735    remove_from_list(&variant->list_item_global);
3736    lp->nr_fs_variants--;
3737    lp->nr_fs_instrs -= variant->nr_instrs;
3738 }
3739 
3740 void
llvmpipe_destroy_shader_variant(struct llvmpipe_context * lp,struct lp_fragment_shader_variant * variant)3741 llvmpipe_destroy_shader_variant(struct llvmpipe_context *lp,
3742                                struct lp_fragment_shader_variant *variant)
3743 {
3744    gallivm_destroy(variant->gallivm);
3745 
3746    lp_fs_reference(lp, &variant->shader, NULL);
3747 
3748    FREE(variant);
3749 }
3750 
3751 void
llvmpipe_destroy_fs(struct llvmpipe_context * llvmpipe,struct lp_fragment_shader * shader)3752 llvmpipe_destroy_fs(struct llvmpipe_context *llvmpipe,
3753                     struct lp_fragment_shader *shader)
3754 {
3755    /* Delete draw module's data */
3756    draw_delete_fragment_shader(llvmpipe->draw, shader->draw_data);
3757 
3758    if (shader->base.ir.nir)
3759       ralloc_free(shader->base.ir.nir);
3760    assert(shader->variants_cached == 0);
3761    FREE((void *) shader->base.tokens);
3762    FREE(shader);
3763 }
3764 
3765 static void
llvmpipe_delete_fs_state(struct pipe_context * pipe,void * fs)3766 llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
3767 {
3768    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3769    struct lp_fragment_shader *shader = fs;
3770    struct lp_fs_variant_list_item *li;
3771 
3772    /* Delete all the variants */
3773    li = first_elem(&shader->variants);
3774    while(!at_end(&shader->variants, li)) {
3775       struct lp_fs_variant_list_item *next = next_elem(li);
3776       struct lp_fragment_shader_variant *variant;
3777       variant = li->base;
3778       llvmpipe_remove_shader_variant(llvmpipe, li->base);
3779       lp_fs_variant_reference(llvmpipe, &variant, NULL);
3780       li = next;
3781    }
3782 
3783    lp_fs_reference(llvmpipe, &shader, NULL);
3784 }
3785 
3786 static void
llvmpipe_set_constant_buffer(struct pipe_context * pipe,enum pipe_shader_type shader,uint index,const struct pipe_constant_buffer * cb)3787 llvmpipe_set_constant_buffer(struct pipe_context *pipe,
3788                              enum pipe_shader_type shader, uint index,
3789                              const struct pipe_constant_buffer *cb)
3790 {
3791    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3792    struct pipe_resource *constants = cb ? cb->buffer : NULL;
3793 
3794    assert(shader < PIPE_SHADER_TYPES);
3795    assert(index < ARRAY_SIZE(llvmpipe->constants[shader]));
3796 
3797    /* note: reference counting */
3798    util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb);
3799 
3800    if (constants) {
3801        if (!(constants->bind & PIPE_BIND_CONSTANT_BUFFER)) {
3802          debug_printf("Illegal set constant without bind flag\n");
3803          constants->bind |= PIPE_BIND_CONSTANT_BUFFER;
3804       }
3805    }
3806 
3807    if (shader == PIPE_SHADER_VERTEX ||
3808        shader == PIPE_SHADER_GEOMETRY ||
3809        shader == PIPE_SHADER_TESS_CTRL ||
3810        shader == PIPE_SHADER_TESS_EVAL) {
3811       /* Pass the constants to the 'draw' module */
3812       const unsigned size = cb ? cb->buffer_size : 0;
3813       const ubyte *data;
3814 
3815       if (constants) {
3816          data = (ubyte *) llvmpipe_resource_data(constants);
3817       }
3818       else if (cb && cb->user_buffer) {
3819          data = (ubyte *) cb->user_buffer;
3820       }
3821       else {
3822          data = NULL;
3823       }
3824 
3825       if (data)
3826          data += cb->buffer_offset;
3827 
3828       draw_set_mapped_constant_buffer(llvmpipe->draw, shader,
3829                                       index, data, size);
3830    }
3831    else if (shader == PIPE_SHADER_COMPUTE)
3832       llvmpipe->cs_dirty |= LP_CSNEW_CONSTANTS;
3833    else
3834       llvmpipe->dirty |= LP_NEW_FS_CONSTANTS;
3835 
3836    if (cb && cb->user_buffer) {
3837       pipe_resource_reference(&constants, NULL);
3838    }
3839 }
3840 
3841 static void
llvmpipe_set_shader_buffers(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)3842 llvmpipe_set_shader_buffers(struct pipe_context *pipe,
3843                             enum pipe_shader_type shader, unsigned start_slot,
3844                             unsigned count, const struct pipe_shader_buffer *buffers,
3845                             unsigned writable_bitmask)
3846 {
3847    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3848    unsigned i, idx;
3849    for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
3850       const struct pipe_shader_buffer *buffer = buffers ? &buffers[idx] : NULL;
3851 
3852       util_copy_shader_buffer(&llvmpipe->ssbos[shader][i], buffer);
3853 
3854       if (shader == PIPE_SHADER_VERTEX ||
3855           shader == PIPE_SHADER_GEOMETRY ||
3856           shader == PIPE_SHADER_TESS_CTRL ||
3857           shader == PIPE_SHADER_TESS_EVAL) {
3858          const unsigned size = buffer ? buffer->buffer_size : 0;
3859          const ubyte *data = NULL;
3860          if (buffer && buffer->buffer)
3861             data = (ubyte *) llvmpipe_resource_data(buffer->buffer);
3862          if (data)
3863             data += buffer->buffer_offset;
3864          draw_set_mapped_shader_buffer(llvmpipe->draw, shader,
3865                                        i, data, size);
3866       } else if (shader == PIPE_SHADER_COMPUTE) {
3867 	 llvmpipe->cs_dirty |= LP_CSNEW_SSBOS;
3868       } else if (shader == PIPE_SHADER_FRAGMENT) {
3869          llvmpipe->dirty |= LP_NEW_FS_SSBOS;
3870       }
3871    }
3872 }
3873 
3874 static void
llvmpipe_set_shader_images(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)3875 llvmpipe_set_shader_images(struct pipe_context *pipe,
3876                             enum pipe_shader_type shader, unsigned start_slot,
3877                            unsigned count, const struct pipe_image_view *images)
3878 {
3879    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
3880    unsigned i, idx;
3881 
3882    draw_flush(llvmpipe->draw);
3883    for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
3884       const struct pipe_image_view *image = images ? &images[idx] : NULL;
3885 
3886       util_copy_image_view(&llvmpipe->images[shader][i], image);
3887    }
3888 
3889    llvmpipe->num_images[shader] = start_slot + count;
3890    if (shader == PIPE_SHADER_VERTEX ||
3891        shader == PIPE_SHADER_GEOMETRY ||
3892        shader == PIPE_SHADER_TESS_CTRL ||
3893        shader == PIPE_SHADER_TESS_EVAL) {
3894       draw_set_images(llvmpipe->draw,
3895                       shader,
3896                       llvmpipe->images[shader],
3897                       start_slot + count);
3898    } else if (shader == PIPE_SHADER_COMPUTE)
3899       llvmpipe->cs_dirty |= LP_CSNEW_IMAGES;
3900    else
3901       llvmpipe->dirty |= LP_NEW_FS_IMAGES;
3902 }
3903 
3904 /**
3905  * Return the blend factor equivalent to a destination alpha of one.
3906  */
3907 static inline unsigned
force_dst_alpha_one(unsigned factor,boolean clamped_zero)3908 force_dst_alpha_one(unsigned factor, boolean clamped_zero)
3909 {
3910    switch(factor) {
3911    case PIPE_BLENDFACTOR_DST_ALPHA:
3912       return PIPE_BLENDFACTOR_ONE;
3913    case PIPE_BLENDFACTOR_INV_DST_ALPHA:
3914       return PIPE_BLENDFACTOR_ZERO;
3915    case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
3916       if (clamped_zero)
3917          return PIPE_BLENDFACTOR_ZERO;
3918       else
3919          return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
3920    }
3921 
3922    return factor;
3923 }
3924 
3925 
3926 /**
3927  * We need to generate several variants of the fragment pipeline to match
3928  * all the combinations of the contributing state atoms.
3929  *
3930  * TODO: there is actually no reason to tie this to context state -- the
3931  * generated code could be cached globally in the screen.
3932  */
3933 static struct lp_fragment_shader_variant_key *
make_variant_key(struct llvmpipe_context * lp,struct lp_fragment_shader * shader,char * store)3934 make_variant_key(struct llvmpipe_context *lp,
3935                  struct lp_fragment_shader *shader,
3936                  char *store)
3937 {
3938    unsigned i;
3939    struct lp_fragment_shader_variant_key *key;
3940 
3941    key = (struct lp_fragment_shader_variant_key *)store;
3942 
3943    memset(key, 0, offsetof(struct lp_fragment_shader_variant_key, samplers[1]));
3944 
3945    if (lp->framebuffer.zsbuf) {
3946       enum pipe_format zsbuf_format = lp->framebuffer.zsbuf->format;
3947       const struct util_format_description *zsbuf_desc =
3948          util_format_description(zsbuf_format);
3949 
3950       if (lp->depth_stencil->depth.enabled &&
3951           util_format_has_depth(zsbuf_desc)) {
3952          key->zsbuf_format = zsbuf_format;
3953          memcpy(&key->depth, &lp->depth_stencil->depth, sizeof key->depth);
3954       }
3955       if (lp->depth_stencil->stencil[0].enabled &&
3956           util_format_has_stencil(zsbuf_desc)) {
3957          key->zsbuf_format = zsbuf_format;
3958          memcpy(&key->stencil, &lp->depth_stencil->stencil, sizeof key->stencil);
3959       }
3960       if (llvmpipe_resource_is_1d(lp->framebuffer.zsbuf->texture)) {
3961          key->resource_1d = TRUE;
3962       }
3963       key->zsbuf_nr_samples = util_res_sample_count(lp->framebuffer.zsbuf->texture);
3964    }
3965 
3966    /*
3967     * Propagate the depth clamp setting from the rasterizer state.
3968     * depth_clip == 0 implies depth clamping is enabled.
3969     *
3970     * When clip_halfz is enabled, then always clamp the depth values.
3971     *
3972     * XXX: This is incorrect for GL, but correct for d3d10 (depth
3973     * clamp is always active in d3d10, regardless if depth clip is
3974     * enabled or not).
3975     * (GL has an always-on [0,1] clamp on fs depth output instead
3976     * to ensure the depth values stay in range. Doesn't look like
3977     * we do that, though...)
3978     */
3979    if (lp->rasterizer->clip_halfz) {
3980       key->depth_clamp = 1;
3981    } else {
3982       key->depth_clamp = (lp->rasterizer->depth_clip_near == 0) ? 1 : 0;
3983    }
3984 
3985    /* alpha test only applies if render buffer 0 is non-integer (or does not exist) */
3986    if (!lp->framebuffer.nr_cbufs ||
3987        !lp->framebuffer.cbufs[0] ||
3988        !util_format_is_pure_integer(lp->framebuffer.cbufs[0]->format)) {
3989       key->alpha.enabled = lp->depth_stencil->alpha.enabled;
3990    }
3991    if(key->alpha.enabled)
3992       key->alpha.func = lp->depth_stencil->alpha.func;
3993    /* alpha.ref_value is passed in jit_context */
3994 
3995    key->flatshade = lp->rasterizer->flatshade;
3996    key->multisample = lp->rasterizer->multisample;
3997    key->no_ms_sample_mask_out = lp->rasterizer->no_ms_sample_mask_out;
3998    if (lp->active_occlusion_queries && !lp->queries_disabled) {
3999       key->occlusion_count = TRUE;
4000    }
4001 
4002    memcpy(&key->blend, lp->blend, sizeof key->blend);
4003 
4004    key->coverage_samples = 1;
4005    key->min_samples = 1;
4006    if (key->multisample) {
4007       key->coverage_samples = util_framebuffer_get_num_samples(&lp->framebuffer);
4008       key->min_samples = lp->min_samples == 1 ? 1 : key->coverage_samples;
4009    }
4010    key->nr_cbufs = lp->framebuffer.nr_cbufs;
4011 
4012    if (!key->blend.independent_blend_enable) {
4013       /* we always need independent blend otherwise the fixups below won't work */
4014       for (i = 1; i < key->nr_cbufs; i++) {
4015          memcpy(&key->blend.rt[i], &key->blend.rt[0], sizeof(key->blend.rt[0]));
4016       }
4017       key->blend.independent_blend_enable = 1;
4018    }
4019 
4020    for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
4021       struct pipe_rt_blend_state *blend_rt = &key->blend.rt[i];
4022 
4023       if (lp->framebuffer.cbufs[i]) {
4024          enum pipe_format format = lp->framebuffer.cbufs[i]->format;
4025          const struct util_format_description *format_desc;
4026 
4027          key->cbuf_format[i] = format;
4028          key->cbuf_nr_samples[i] = util_res_sample_count(lp->framebuffer.cbufs[i]->texture);
4029 
4030          /*
4031           * Figure out if this is a 1d resource. Note that OpenGL allows crazy
4032           * mixing of 2d textures with height 1 and 1d textures, so make sure
4033           * we pick 1d if any cbuf or zsbuf is 1d.
4034           */
4035          if (llvmpipe_resource_is_1d(lp->framebuffer.cbufs[i]->texture)) {
4036             key->resource_1d = TRUE;
4037          }
4038 
4039          format_desc = util_format_description(format);
4040          assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
4041                 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
4042 
4043          /*
4044           * Mask out color channels not present in the color buffer.
4045           */
4046          blend_rt->colormask &= util_format_colormask(format_desc);
4047 
4048          /*
4049           * Disable blend for integer formats.
4050           */
4051          if (util_format_is_pure_integer(format)) {
4052             blend_rt->blend_enable = 0;
4053          }
4054 
4055          /*
4056           * Our swizzled render tiles always have an alpha channel, but the
4057           * linear render target format often does not, so force here the dst
4058           * alpha to be one.
4059           *
4060           * This is not a mere optimization. Wrong results will be produced if
4061           * the dst alpha is used, the dst format does not have alpha, and the
4062           * previous rendering was not flushed from the swizzled to linear
4063           * buffer. For example, NonPowTwo DCT.
4064           *
4065           * TODO: This should be generalized to all channels for better
4066           * performance, but only alpha causes correctness issues.
4067           *
4068           * Also, force rgb/alpha func/factors match, to make AoS blending
4069           * easier.
4070           */
4071          if (format_desc->swizzle[3] > PIPE_SWIZZLE_W ||
4072              format_desc->swizzle[3] == format_desc->swizzle[0]) {
4073             /* Doesn't cover mixed snorm/unorm but can't render to them anyway */
4074             boolean clamped_zero = !util_format_is_float(format) &&
4075                                    !util_format_is_snorm(format);
4076             blend_rt->rgb_src_factor =
4077                force_dst_alpha_one(blend_rt->rgb_src_factor, clamped_zero);
4078             blend_rt->rgb_dst_factor =
4079                force_dst_alpha_one(blend_rt->rgb_dst_factor, clamped_zero);
4080             blend_rt->alpha_func       = blend_rt->rgb_func;
4081             blend_rt->alpha_src_factor = blend_rt->rgb_src_factor;
4082             blend_rt->alpha_dst_factor = blend_rt->rgb_dst_factor;
4083          }
4084       }
4085       else {
4086          /* no color buffer for this fragment output */
4087          key->cbuf_format[i] = PIPE_FORMAT_NONE;
4088          key->cbuf_nr_samples[i] = 0;
4089          blend_rt->colormask = 0x0;
4090          blend_rt->blend_enable = 0;
4091       }
4092    }
4093 
4094    /* This value will be the same for all the variants of a given shader:
4095     */
4096    key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
4097 
4098    struct lp_sampler_static_state *fs_sampler;
4099 
4100    fs_sampler = key->samplers;
4101 
4102    memset(fs_sampler, 0, MAX2(key->nr_samplers, key->nr_sampler_views) * sizeof *fs_sampler);
4103 
4104    for(i = 0; i < key->nr_samplers; ++i) {
4105       if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
4106          lp_sampler_static_sampler_state(&fs_sampler[i].sampler_state,
4107                                          lp->samplers[PIPE_SHADER_FRAGMENT][i]);
4108       }
4109    }
4110 
4111    /*
4112     * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
4113     * are dx10-style? Can't really have mixed opcodes, at least not
4114     * if we want to skip the holes here (without rescanning tgsi).
4115     */
4116    if (shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
4117       key->nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
4118       for(i = 0; i < key->nr_sampler_views; ++i) {
4119          /*
4120           * Note sview may exceed what's representable by file_mask.
4121           * This will still work, the only downside is that not actually
4122           * used views may be included in the shader key.
4123           */
4124          if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
4125             lp_sampler_static_texture_state(&fs_sampler[i].texture_state,
4126                                             lp->sampler_views[PIPE_SHADER_FRAGMENT][i]);
4127          }
4128       }
4129    }
4130    else {
4131       key->nr_sampler_views = key->nr_samplers;
4132       for(i = 0; i < key->nr_sampler_views; ++i) {
4133          if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
4134             lp_sampler_static_texture_state(&fs_sampler[i].texture_state,
4135                                             lp->sampler_views[PIPE_SHADER_FRAGMENT][i]);
4136          }
4137       }
4138    }
4139 
4140    struct lp_image_static_state *lp_image;
4141    lp_image = lp_fs_variant_key_images(key);
4142    key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
4143    for (i = 0; i < key->nr_images; ++i) {
4144       if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
4145          lp_sampler_static_texture_state_image(&lp_image[i].image_state,
4146                                                &lp->images[PIPE_SHADER_FRAGMENT][i]);
4147       }
4148    }
4149    return key;
4150 }
4151 
4152 
4153 
4154 /**
4155  * Update fragment shader state.  This is called just prior to drawing
4156  * something when some fragment-related state has changed.
4157  */
4158 void
llvmpipe_update_fs(struct llvmpipe_context * lp)4159 llvmpipe_update_fs(struct llvmpipe_context *lp)
4160 {
4161    struct lp_fragment_shader *shader = lp->fs;
4162    struct lp_fragment_shader_variant_key *key;
4163    struct lp_fragment_shader_variant *variant = NULL;
4164    struct lp_fs_variant_list_item *li;
4165    char store[LP_FS_MAX_VARIANT_KEY_SIZE];
4166 
4167    key = make_variant_key(lp, shader, store);
4168 
4169    /* Search the variants for one which matches the key */
4170    li = first_elem(&shader->variants);
4171    while(!at_end(&shader->variants, li)) {
4172       if(memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
4173          variant = li->base;
4174          break;
4175       }
4176       li = next_elem(li);
4177    }
4178 
4179    if (variant) {
4180       /* Move this variant to the head of the list to implement LRU
4181        * deletion of shader's when we have too many.
4182        */
4183       move_to_head(&lp->fs_variants_list, &variant->list_item_global);
4184    }
4185    else {
4186       /* variant not found, create it now */
4187       int64_t t0, t1, dt;
4188       unsigned i;
4189       unsigned variants_to_cull;
4190 
4191       if (LP_DEBUG & DEBUG_FS) {
4192          debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
4193                       lp->nr_fs_variants,
4194                       lp->nr_fs_instrs,
4195                       lp->nr_fs_variants ? lp->nr_fs_instrs / lp->nr_fs_variants : 0);
4196       }
4197 
4198       /* First, check if we've exceeded the max number of shader variants.
4199        * If so, free 6.25% of them (the least recently used ones).
4200        */
4201       variants_to_cull = lp->nr_fs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
4202 
4203       if (variants_to_cull ||
4204           lp->nr_fs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
4205          if (gallivm_debug & GALLIVM_DEBUG_PERF) {
4206             debug_printf("Evicting FS: %u fs variants,\t%u total variants,"
4207                          "\t%u instrs,\t%u instrs/variant\n",
4208                          shader->variants_cached,
4209                          lp->nr_fs_variants, lp->nr_fs_instrs,
4210                          lp->nr_fs_instrs / lp->nr_fs_variants);
4211          }
4212 
4213          /*
4214           * We need to re-check lp->nr_fs_variants because an arbitrarliy large
4215           * number of shader variants (potentially all of them) could be
4216           * pending for destruction on flush.
4217           */
4218 
4219          for (i = 0; i < variants_to_cull || lp->nr_fs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
4220             struct lp_fs_variant_list_item *item;
4221             if (is_empty_list(&lp->fs_variants_list)) {
4222                break;
4223             }
4224             item = last_elem(&lp->fs_variants_list);
4225             assert(item);
4226             assert(item->base);
4227             llvmpipe_remove_shader_variant(lp, item->base);
4228             lp_fs_variant_reference(lp, &item->base, NULL);
4229          }
4230       }
4231 
4232       /*
4233        * Generate the new variant.
4234        */
4235       t0 = os_time_get();
4236       variant = generate_variant(lp, shader, key);
4237       t1 = os_time_get();
4238       dt = t1 - t0;
4239       LP_COUNT_ADD(llvm_compile_time, dt);
4240       LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */
4241 
4242       /* Put the new variant into the list */
4243       if (variant) {
4244          insert_at_head(&shader->variants, &variant->list_item_local);
4245          insert_at_head(&lp->fs_variants_list, &variant->list_item_global);
4246          lp->nr_fs_variants++;
4247          lp->nr_fs_instrs += variant->nr_instrs;
4248          shader->variants_cached++;
4249       }
4250    }
4251 
4252    /* Bind this variant */
4253    lp_setup_set_fs_variant(lp->setup, variant);
4254 }
4255 
4256 
4257 
4258 
4259 
4260 void
llvmpipe_init_fs_funcs(struct llvmpipe_context * llvmpipe)4261 llvmpipe_init_fs_funcs(struct llvmpipe_context *llvmpipe)
4262 {
4263    llvmpipe->pipe.create_fs_state = llvmpipe_create_fs_state;
4264    llvmpipe->pipe.bind_fs_state   = llvmpipe_bind_fs_state;
4265    llvmpipe->pipe.delete_fs_state = llvmpipe_delete_fs_state;
4266 
4267    llvmpipe->pipe.set_constant_buffer = llvmpipe_set_constant_buffer;
4268 
4269    llvmpipe->pipe.set_shader_buffers = llvmpipe_set_shader_buffers;
4270    llvmpipe->pipe.set_shader_images = llvmpipe_set_shader_images;
4271 }
4272 
4273 
4274