• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2009-2010 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * @file
30  * Depth/stencil testing to LLVM IR translation.
31  *
32  * To be done accurately/efficiently the depth/stencil test must be done with
33  * the same type/format of the depth/stencil buffer, which implies massaging
34  * the incoming depths to fit into place. Using a more straightforward
35  * type/format for depth/stencil values internally and only convert when
36  * flushing would avoid this, but it would most likely result in depth fighting
37  * artifacts.
38  *
39  * Since we're using linear layout for everything, but we need to deal with
40  * 2x2 quads, we need to load/store multiple values and swizzle them into
41  * place (we could avoid this by doing depth/stencil testing in linear format,
42  * which would be easy for late depth/stencil test as we could do that after
43  * the fragment shader loop just as we do for color buffers, but more tricky
44  * for early depth test as we'd need both masks and interpolated depth in
45  * linear format).
46  *
47  *
48  * @author Jose Fonseca <jfonseca@vmware.com>
49  * @author Brian Paul <jfonseca@vmware.com>
50  */
51 
52 #include "pipe/p_state.h"
53 #include "util/format/u_format.h"
54 #include "util/u_cpu_detect.h"
55 
56 #include "gallivm/lp_bld_type.h"
57 #include "gallivm/lp_bld_arit.h"
58 #include "gallivm/lp_bld_bitarit.h"
59 #include "gallivm/lp_bld_const.h"
60 #include "gallivm/lp_bld_conv.h"
61 #include "gallivm/lp_bld_logic.h"
62 #include "gallivm/lp_bld_flow.h"
63 #include "gallivm/lp_bld_intr.h"
64 #include "gallivm/lp_bld_debug.h"
65 #include "gallivm/lp_bld_swizzle.h"
66 #include "gallivm/lp_bld_pack.h"
67 
68 #include "lp_bld_depth.h"
69 
70 
71 /** Used to select fields from pipe_stencil_state */
72 enum stencil_op {
73    S_FAIL_OP,
74    Z_FAIL_OP,
75    Z_PASS_OP
76 };
77 
78 
79 
80 /**
81  * Do the stencil test comparison (compare FB stencil values against ref value).
82  * This will be used twice when generating two-sided stencil code.
83  * \param stencil  the front/back stencil state
84  * \param stencilRef  the stencil reference value, replicated as a vector
85  * \param stencilVals  vector of stencil values from framebuffer
86  * \return vector mask of pass/fail values (~0 or 0)
87  */
88 static LLVMValueRef
lp_build_stencil_test_single(struct lp_build_context * bld,const struct pipe_stencil_state * stencil,LLVMValueRef stencilRef,LLVMValueRef stencilVals)89 lp_build_stencil_test_single(struct lp_build_context *bld,
90                              const struct pipe_stencil_state *stencil,
91                              LLVMValueRef stencilRef,
92                              LLVMValueRef stencilVals)
93 {
94    LLVMBuilderRef builder = bld->gallivm->builder;
95    const unsigned stencilMax = 255; /* XXX fix */
96    struct lp_type type = bld->type;
97    LLVMValueRef res;
98 
99    /*
100     * SSE2 has intrinsics for signed comparisons, but not unsigned ones. Values
101     * are between 0..255 so ensure we generate the fastest comparisons for
102     * wider elements.
103     */
104    if (type.width <= 8) {
105       assert(!type.sign);
106    } else {
107       assert(type.sign);
108    }
109 
110    assert(stencil->enabled);
111 
112    if (stencil->valuemask != stencilMax) {
113       /* compute stencilRef = stencilRef & valuemask */
114       LLVMValueRef valuemask = lp_build_const_int_vec(bld->gallivm, type, stencil->valuemask);
115       stencilRef = LLVMBuildAnd(builder, stencilRef, valuemask, "");
116       /* compute stencilVals = stencilVals & valuemask */
117       stencilVals = LLVMBuildAnd(builder, stencilVals, valuemask, "");
118    }
119 
120    res = lp_build_cmp(bld, stencil->func, stencilRef, stencilVals);
121 
122    return res;
123 }
124 
125 
126 /**
127  * Do the one or two-sided stencil test comparison.
128  * \sa lp_build_stencil_test_single
129  * \param front_facing  an integer vector mask, indicating front (~0) or back
130  *                      (0) facing polygon. If NULL, assume front-facing.
131  */
132 static LLVMValueRef
lp_build_stencil_test(struct lp_build_context * bld,const struct pipe_stencil_state stencil[2],LLVMValueRef stencilRefs[2],LLVMValueRef stencilVals,LLVMValueRef front_facing)133 lp_build_stencil_test(struct lp_build_context *bld,
134                       const struct pipe_stencil_state stencil[2],
135                       LLVMValueRef stencilRefs[2],
136                       LLVMValueRef stencilVals,
137                       LLVMValueRef front_facing)
138 {
139    LLVMValueRef res;
140 
141    assert(stencil[0].enabled);
142 
143    /* do front face test */
144    res = lp_build_stencil_test_single(bld, &stencil[0],
145                                       stencilRefs[0], stencilVals);
146 
147    if (stencil[1].enabled && front_facing != NULL) {
148       /* do back face test */
149       LLVMValueRef back_res;
150 
151       back_res = lp_build_stencil_test_single(bld, &stencil[1],
152                                               stencilRefs[1], stencilVals);
153 
154       res = lp_build_select(bld, front_facing, res, back_res);
155    }
156 
157    return res;
158 }
159 
160 
161 /**
162  * Apply the stencil operator (add/sub/keep/etc) to the given vector
163  * of stencil values.
164  * \return  new stencil values vector
165  */
166 static LLVMValueRef
lp_build_stencil_op_single(struct lp_build_context * bld,const struct pipe_stencil_state * stencil,enum stencil_op op,LLVMValueRef stencilRef,LLVMValueRef stencilVals)167 lp_build_stencil_op_single(struct lp_build_context *bld,
168                            const struct pipe_stencil_state *stencil,
169                            enum stencil_op op,
170                            LLVMValueRef stencilRef,
171                            LLVMValueRef stencilVals)
172 
173 {
174    LLVMBuilderRef builder = bld->gallivm->builder;
175    struct lp_type type = bld->type;
176    LLVMValueRef res;
177    LLVMValueRef max = lp_build_const_int_vec(bld->gallivm, type, 0xff);
178    unsigned stencil_op;
179 
180    assert(type.sign);
181 
182    switch (op) {
183    case S_FAIL_OP:
184       stencil_op = stencil->fail_op;
185       break;
186    case Z_FAIL_OP:
187       stencil_op = stencil->zfail_op;
188       break;
189    case Z_PASS_OP:
190       stencil_op = stencil->zpass_op;
191       break;
192    default:
193       assert(0 && "Invalid stencil_op mode");
194       stencil_op = PIPE_STENCIL_OP_KEEP;
195    }
196 
197    switch (stencil_op) {
198    case PIPE_STENCIL_OP_KEEP:
199       res = stencilVals;
200       /* we can return early for this case */
201       return res;
202    case PIPE_STENCIL_OP_ZERO:
203       res = bld->zero;
204       break;
205    case PIPE_STENCIL_OP_REPLACE:
206       res = stencilRef;
207       break;
208    case PIPE_STENCIL_OP_INCR:
209       res = lp_build_add(bld, stencilVals, bld->one);
210       res = lp_build_min(bld, res, max);
211       break;
212    case PIPE_STENCIL_OP_DECR:
213       res = lp_build_sub(bld, stencilVals, bld->one);
214       res = lp_build_max(bld, res, bld->zero);
215       break;
216    case PIPE_STENCIL_OP_INCR_WRAP:
217       res = lp_build_add(bld, stencilVals, bld->one);
218       res = LLVMBuildAnd(builder, res, max, "");
219       break;
220    case PIPE_STENCIL_OP_DECR_WRAP:
221       res = lp_build_sub(bld, stencilVals, bld->one);
222       res = LLVMBuildAnd(builder, res, max, "");
223       break;
224    case PIPE_STENCIL_OP_INVERT:
225       res = LLVMBuildNot(builder, stencilVals, "");
226       res = LLVMBuildAnd(builder, res, max, "");
227       break;
228    default:
229       assert(0 && "bad stencil op mode");
230       res = bld->undef;
231    }
232 
233    return res;
234 }
235 
236 
237 /**
238  * Do the one or two-sided stencil test op/update.
239  */
240 static LLVMValueRef
lp_build_stencil_op(struct lp_build_context * bld,const struct pipe_stencil_state stencil[2],enum stencil_op op,LLVMValueRef stencilRefs[2],LLVMValueRef stencilVals,LLVMValueRef mask,LLVMValueRef front_facing)241 lp_build_stencil_op(struct lp_build_context *bld,
242                     const struct pipe_stencil_state stencil[2],
243                     enum stencil_op op,
244                     LLVMValueRef stencilRefs[2],
245                     LLVMValueRef stencilVals,
246                     LLVMValueRef mask,
247                     LLVMValueRef front_facing)
248 
249 {
250    LLVMBuilderRef builder = bld->gallivm->builder;
251    LLVMValueRef res;
252 
253    assert(stencil[0].enabled);
254 
255    /* do front face op */
256    res = lp_build_stencil_op_single(bld, &stencil[0], op,
257                                      stencilRefs[0], stencilVals);
258 
259    if (stencil[1].enabled && front_facing != NULL) {
260       /* do back face op */
261       LLVMValueRef back_res;
262 
263       back_res = lp_build_stencil_op_single(bld, &stencil[1], op,
264                                             stencilRefs[1], stencilVals);
265 
266       res = lp_build_select(bld, front_facing, res, back_res);
267    }
268 
269    if (stencil[0].writemask != 0xff ||
270        (stencil[1].enabled && front_facing != NULL && stencil[1].writemask != 0xff)) {
271       /* mask &= stencil[0].writemask */
272       LLVMValueRef writemask = lp_build_const_int_vec(bld->gallivm, bld->type,
273                                                       stencil[0].writemask);
274       if (stencil[1].enabled && stencil[1].writemask != stencil[0].writemask && front_facing != NULL) {
275          LLVMValueRef back_writemask = lp_build_const_int_vec(bld->gallivm, bld->type,
276                                                          stencil[1].writemask);
277          writemask = lp_build_select(bld, front_facing, writemask, back_writemask);
278       }
279 
280       mask = LLVMBuildAnd(builder, mask, writemask, "");
281       /* res = (res & mask) | (stencilVals & ~mask) */
282       res = lp_build_select_bitwise(bld, mask, res, stencilVals);
283    }
284    else {
285       /* res = mask ? res : stencilVals */
286       res = lp_build_select(bld, mask, res, stencilVals);
287    }
288 
289    return res;
290 }
291 
292 
293 
294 /**
295  * Return a type that matches the depth/stencil format.
296  */
297 struct lp_type
lp_depth_type(const struct util_format_description * format_desc,unsigned length)298 lp_depth_type(const struct util_format_description *format_desc,
299               unsigned length)
300 {
301    struct lp_type type;
302    unsigned z_swizzle;
303 
304    assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
305    assert(format_desc->block.width == 1);
306    assert(format_desc->block.height == 1);
307 
308    memset(&type, 0, sizeof type);
309    type.width = format_desc->block.bits;
310 
311    z_swizzle = format_desc->swizzle[0];
312    if (z_swizzle < 4) {
313       if (format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_FLOAT) {
314          type.floating = TRUE;
315          assert(z_swizzle == 0);
316          assert(format_desc->channel[z_swizzle].size == 32);
317       }
318       else if(format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED) {
319          assert(format_desc->block.bits <= 32);
320          assert(format_desc->channel[z_swizzle].normalized);
321          if (format_desc->channel[z_swizzle].size < format_desc->block.bits) {
322             /* Prefer signed integers when possible, as SSE has less support
323              * for unsigned comparison;
324              */
325             type.sign = TRUE;
326          }
327       }
328       else
329          assert(0);
330    }
331 
332    type.length = length;
333 
334    return type;
335 }
336 
337 
338 /**
339  * Compute bitmask and bit shift to apply to the incoming fragment Z values
340  * and the Z buffer values needed before doing the Z comparison.
341  *
342  * Note that we leave the Z bits in the position that we find them
343  * in the Z buffer (typically 0xffffff00 or 0x00ffffff).  That lets us
344  * get by with fewer bit twiddling steps.
345  */
346 static boolean
get_z_shift_and_mask(const struct util_format_description * format_desc,unsigned * shift,unsigned * width,unsigned * mask)347 get_z_shift_and_mask(const struct util_format_description *format_desc,
348                      unsigned *shift, unsigned *width, unsigned *mask)
349 {
350    unsigned total_bits;
351    unsigned z_swizzle;
352 
353    assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
354    assert(format_desc->block.width == 1);
355    assert(format_desc->block.height == 1);
356 
357    /* 64bit d/s format is special already extracted 32 bits */
358    total_bits = format_desc->block.bits > 32 ? 32 : format_desc->block.bits;
359 
360    z_swizzle = format_desc->swizzle[0];
361 
362    if (z_swizzle == PIPE_SWIZZLE_NONE)
363       return FALSE;
364 
365    *width = format_desc->channel[z_swizzle].size;
366    /* & 31 is for the same reason as the 32-bit limit above */
367    *shift = format_desc->channel[z_swizzle].shift & 31;
368 
369    if (*width == total_bits) {
370       *mask = 0xffffffff;
371    } else {
372       *mask = ((1 << *width) - 1) << *shift;
373    }
374 
375    return TRUE;
376 }
377 
378 
379 /**
380  * Compute bitmask and bit shift to apply to the framebuffer pixel values
381  * to put the stencil bits in the least significant position.
382  * (i.e. 0x000000ff)
383  */
384 static boolean
get_s_shift_and_mask(const struct util_format_description * format_desc,unsigned * shift,unsigned * mask)385 get_s_shift_and_mask(const struct util_format_description *format_desc,
386                      unsigned *shift, unsigned *mask)
387 {
388    unsigned s_swizzle;
389    unsigned sz;
390 
391    s_swizzle = format_desc->swizzle[1];
392 
393    if (s_swizzle == PIPE_SWIZZLE_NONE)
394       return FALSE;
395 
396    /* just special case 64bit d/s format */
397    if (format_desc->block.bits > 32) {
398       /* XXX big-endian? */
399       assert(format_desc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT);
400       *shift = 0;
401       *mask = 0xff;
402       return TRUE;
403    }
404 
405    *shift = format_desc->channel[s_swizzle].shift;
406    sz = format_desc->channel[s_swizzle].size;
407    *mask = (1U << sz) - 1U;
408 
409    return TRUE;
410 }
411 
412 
413 /**
414  * Perform the occlusion test and increase the counter.
415  * Test the depth mask. Add the number of channel which has none zero mask
416  * into the occlusion counter. e.g. maskvalue is {-1, -1, -1, -1}.
417  * The counter will add 4.
418  * TODO: could get that out of the fs loop.
419  *
420  * \param type holds element type of the mask vector.
421  * \param maskvalue is the depth test mask.
422  * \param counter is a pointer of the uint32 counter.
423  */
424 void
lp_build_occlusion_count(struct gallivm_state * gallivm,struct lp_type type,LLVMValueRef maskvalue,LLVMValueRef counter)425 lp_build_occlusion_count(struct gallivm_state *gallivm,
426                          struct lp_type type,
427                          LLVMValueRef maskvalue,
428                          LLVMValueRef counter)
429 {
430    LLVMBuilderRef builder = gallivm->builder;
431    LLVMContextRef context = gallivm->context;
432    LLVMValueRef countmask = lp_build_const_int_vec(gallivm, type, 1);
433    LLVMValueRef count, newcount;
434 
435    assert(type.length <= 16);
436    assert(type.floating);
437 
438    if(util_cpu_caps.has_sse && type.length == 4) {
439       const char *movmskintr = "llvm.x86.sse.movmsk.ps";
440       const char *popcntintr = "llvm.ctpop.i32";
441       LLVMValueRef bits = LLVMBuildBitCast(builder, maskvalue,
442                                            lp_build_vec_type(gallivm, type), "");
443       bits = lp_build_intrinsic_unary(builder, movmskintr,
444                                       LLVMInt32TypeInContext(context), bits);
445       count = lp_build_intrinsic_unary(builder, popcntintr,
446                                        LLVMInt32TypeInContext(context), bits);
447       count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
448    }
449    else if(util_cpu_caps.has_avx && type.length == 8) {
450       const char *movmskintr = "llvm.x86.avx.movmsk.ps.256";
451       const char *popcntintr = "llvm.ctpop.i32";
452       LLVMValueRef bits = LLVMBuildBitCast(builder, maskvalue,
453                                            lp_build_vec_type(gallivm, type), "");
454       bits = lp_build_intrinsic_unary(builder, movmskintr,
455                                       LLVMInt32TypeInContext(context), bits);
456       count = lp_build_intrinsic_unary(builder, popcntintr,
457                                        LLVMInt32TypeInContext(context), bits);
458       count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
459    }
460    else {
461       unsigned i;
462       LLVMValueRef countv = LLVMBuildAnd(builder, maskvalue, countmask, "countv");
463       LLVMTypeRef counttype = LLVMIntTypeInContext(context, type.length * 8);
464       LLVMTypeRef i8vntype = LLVMVectorType(LLVMInt8TypeInContext(context), type.length * 4);
465       LLVMValueRef shufflev, countd;
466       LLVMValueRef shuffles[16];
467       const char *popcntintr = NULL;
468 
469       countv = LLVMBuildBitCast(builder, countv, i8vntype, "");
470 
471        for (i = 0; i < type.length; i++) {
472 #if UTIL_ARCH_LITTLE_ENDIAN
473           shuffles[i] = lp_build_const_int32(gallivm, 4*i);
474 #else
475           shuffles[i] = lp_build_const_int32(gallivm, (4*i) + 3);
476 #endif
477        }
478 
479        shufflev = LLVMConstVector(shuffles, type.length);
480        countd = LLVMBuildShuffleVector(builder, countv, LLVMGetUndef(i8vntype), shufflev, "");
481        countd = LLVMBuildBitCast(builder, countd, counttype, "countd");
482 
483        /*
484         * XXX FIXME
485         * this is bad on cpus without popcount (on x86 supported by intel
486         * nehalem, amd barcelona, and up - not tied to sse42).
487         * Would be much faster to just sum the 4 elements of the vector with
488         * some horizontal add (shuffle/add/shuffle/add after the initial and).
489         */
490        switch (type.length) {
491        case 4:
492           popcntintr = "llvm.ctpop.i32";
493           break;
494        case 8:
495           popcntintr = "llvm.ctpop.i64";
496           break;
497        case 16:
498           popcntintr = "llvm.ctpop.i128";
499           break;
500        default:
501           assert(0);
502        }
503        count = lp_build_intrinsic_unary(builder, popcntintr, counttype, countd);
504 
505        if (type.length > 8) {
506           count = LLVMBuildTrunc(builder, count, LLVMIntTypeInContext(context, 64), "");
507        }
508        else if (type.length < 8) {
509           count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
510        }
511    }
512    newcount = LLVMBuildLoad(builder, counter, "origcount");
513    newcount = LLVMBuildAdd(builder, newcount, count, "newcount");
514    LLVMBuildStore(builder, newcount, counter);
515 }
516 
517 
518 /**
519  * Load depth/stencil values.
520  * The stored values are linear, swizzle them.
521  *
522  * \param type  the data type of the fragment depth/stencil values
523  * \param format_desc  description of the depth/stencil surface
524  * \param is_1d  whether this resource has only one dimension
525  * \param loop_counter  the current loop iteration
526  * \param depth_ptr  pointer to the depth/stencil values of this 4x4 block
527  * \param depth_stride  stride of the depth/stencil buffer
528  * \param z_fb  contains z values loaded from fb (may include padding)
529  * \param s_fb  contains s values loaded from fb (may include padding)
530  */
531 void
lp_build_depth_stencil_load_swizzled(struct gallivm_state * gallivm,struct lp_type z_src_type,const struct util_format_description * format_desc,boolean is_1d,LLVMValueRef depth_ptr,LLVMValueRef depth_stride,LLVMValueRef * z_fb,LLVMValueRef * s_fb,LLVMValueRef loop_counter)532 lp_build_depth_stencil_load_swizzled(struct gallivm_state *gallivm,
533                                      struct lp_type z_src_type,
534                                      const struct util_format_description *format_desc,
535                                      boolean is_1d,
536                                      LLVMValueRef depth_ptr,
537                                      LLVMValueRef depth_stride,
538                                      LLVMValueRef *z_fb,
539                                      LLVMValueRef *s_fb,
540                                      LLVMValueRef loop_counter)
541 {
542    LLVMBuilderRef builder = gallivm->builder;
543    LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 4];
544    LLVMValueRef zs_dst1, zs_dst2;
545    LLVMValueRef zs_dst_ptr;
546    LLVMValueRef depth_offset1, depth_offset2;
547    LLVMTypeRef load_ptr_type;
548    unsigned depth_bytes = format_desc->block.bits / 8;
549    struct lp_type zs_type = lp_depth_type(format_desc, z_src_type.length);
550    struct lp_type zs_load_type = zs_type;
551 
552    zs_load_type.length = zs_load_type.length / 2;
553    load_ptr_type = LLVMPointerType(lp_build_vec_type(gallivm, zs_load_type), 0);
554 
555    if (z_src_type.length == 4) {
556       unsigned i;
557       LLVMValueRef looplsb = LLVMBuildAnd(builder, loop_counter,
558                                           lp_build_const_int32(gallivm, 1), "");
559       LLVMValueRef loopmsb = LLVMBuildAnd(builder, loop_counter,
560                                           lp_build_const_int32(gallivm, 2), "");
561       LLVMValueRef offset2 = LLVMBuildMul(builder, loopmsb,
562                                           depth_stride, "");
563       depth_offset1 = LLVMBuildMul(builder, looplsb,
564                                    lp_build_const_int32(gallivm, depth_bytes * 2), "");
565       depth_offset1 = LLVMBuildAdd(builder, depth_offset1, offset2, "");
566 
567       /* just concatenate the loaded 2x2 values into 4-wide vector */
568       for (i = 0; i < 4; i++) {
569          shuffles[i] = lp_build_const_int32(gallivm, i);
570       }
571    }
572    else {
573       unsigned i;
574       LLVMValueRef loopx2 = LLVMBuildShl(builder, loop_counter,
575                                          lp_build_const_int32(gallivm, 1), "");
576       assert(z_src_type.length == 8);
577       depth_offset1 = LLVMBuildMul(builder, loopx2, depth_stride, "");
578       /*
579        * We load 2x4 values, and need to swizzle them (order
580        * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
581        */
582       for (i = 0; i < 8; i++) {
583          shuffles[i] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
584       }
585    }
586 
587    depth_offset2 = LLVMBuildAdd(builder, depth_offset1, depth_stride, "");
588 
589    /* Load current z/stencil values from z/stencil buffer */
590    zs_dst_ptr = LLVMBuildGEP(builder, depth_ptr, &depth_offset1, 1, "");
591    zs_dst_ptr = LLVMBuildBitCast(builder, zs_dst_ptr, load_ptr_type, "");
592    zs_dst1 = LLVMBuildLoad(builder, zs_dst_ptr, "");
593    if (is_1d) {
594       zs_dst2 = lp_build_undef(gallivm, zs_load_type);
595    }
596    else {
597       zs_dst_ptr = LLVMBuildGEP(builder, depth_ptr, &depth_offset2, 1, "");
598       zs_dst_ptr = LLVMBuildBitCast(builder, zs_dst_ptr, load_ptr_type, "");
599       zs_dst2 = LLVMBuildLoad(builder, zs_dst_ptr, "");
600    }
601 
602    *z_fb = LLVMBuildShuffleVector(builder, zs_dst1, zs_dst2,
603                                   LLVMConstVector(shuffles, zs_type.length), "");
604    *s_fb = *z_fb;
605 
606    if (format_desc->block.bits == 8) {
607       /* Extend stencil-only 8 bit values (S8_UINT) */
608       *s_fb = LLVMBuildZExt(builder, *s_fb,
609                             lp_build_int_vec_type(gallivm, z_src_type), "");
610    }
611 
612    if (format_desc->block.bits < z_src_type.width) {
613       /* Extend destination ZS values (e.g., when reading from Z16_UNORM) */
614       *z_fb = LLVMBuildZExt(builder, *z_fb,
615                             lp_build_int_vec_type(gallivm, z_src_type), "");
616    }
617 
618    else if (format_desc->block.bits > 32) {
619       /* rely on llvm to handle too wide vector we have here nicely */
620       unsigned i;
621       struct lp_type typex2 = zs_type;
622       struct lp_type s_type = zs_type;
623       LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH / 4];
624       LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH / 4];
625       LLVMValueRef tmp;
626 
627       typex2.width = typex2.width / 2;
628       typex2.length = typex2.length * 2;
629       s_type.width = s_type.width / 2;
630       s_type.floating = 0;
631 
632       tmp = LLVMBuildBitCast(builder, *z_fb,
633                              lp_build_vec_type(gallivm, typex2), "");
634 
635       for (i = 0; i < zs_type.length; i++) {
636          shuffles1[i] = lp_build_const_int32(gallivm, i * 2);
637          shuffles2[i] = lp_build_const_int32(gallivm, i * 2 + 1);
638       }
639       *z_fb = LLVMBuildShuffleVector(builder, tmp, tmp,
640                                      LLVMConstVector(shuffles1, zs_type.length), "");
641       *s_fb = LLVMBuildShuffleVector(builder, tmp, tmp,
642                                      LLVMConstVector(shuffles2, zs_type.length), "");
643       *s_fb = LLVMBuildBitCast(builder, *s_fb,
644                                lp_build_vec_type(gallivm, s_type), "");
645       lp_build_name(*s_fb, "s_dst");
646    }
647 
648    lp_build_name(*z_fb, "z_dst");
649    lp_build_name(*s_fb, "s_dst");
650    lp_build_name(*z_fb, "z_dst");
651 }
652 
653 /**
654  * Store depth/stencil values.
655  * Incoming values are swizzled (typically n 2x2 quads), stored linear.
656  * If there's a mask it will do select/store otherwise just store.
657  *
658  * \param type  the data type of the fragment depth/stencil values
659  * \param format_desc  description of the depth/stencil surface
660  * \param is_1d  whether this resource has only one dimension
661  * \param mask_value the alive/dead pixel mask for the quad (vector)
662  * \param z_fb  z values read from fb (with padding)
663  * \param s_fb  s values read from fb (with padding)
664  * \param loop_counter  the current loop iteration
665  * \param depth_ptr  pointer to the depth/stencil values of this 4x4 block
666  * \param depth_stride  stride of the depth/stencil buffer
667  * \param z_value the depth values to store (with padding)
668  * \param s_value the stencil values to store (with padding)
669  */
670 void
lp_build_depth_stencil_write_swizzled(struct gallivm_state * gallivm,struct lp_type z_src_type,const struct util_format_description * format_desc,boolean is_1d,LLVMValueRef mask_value,LLVMValueRef z_fb,LLVMValueRef s_fb,LLVMValueRef loop_counter,LLVMValueRef depth_ptr,LLVMValueRef depth_stride,LLVMValueRef z_value,LLVMValueRef s_value)671 lp_build_depth_stencil_write_swizzled(struct gallivm_state *gallivm,
672                                       struct lp_type z_src_type,
673                                       const struct util_format_description *format_desc,
674                                       boolean is_1d,
675                                       LLVMValueRef mask_value,
676                                       LLVMValueRef z_fb,
677                                       LLVMValueRef s_fb,
678                                       LLVMValueRef loop_counter,
679                                       LLVMValueRef depth_ptr,
680                                       LLVMValueRef depth_stride,
681                                       LLVMValueRef z_value,
682                                       LLVMValueRef s_value)
683 {
684    struct lp_build_context z_bld;
685    LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 4];
686    LLVMBuilderRef builder = gallivm->builder;
687    LLVMValueRef zs_dst1, zs_dst2;
688    LLVMValueRef zs_dst_ptr1, zs_dst_ptr2;
689    LLVMValueRef depth_offset1, depth_offset2;
690    LLVMTypeRef load_ptr_type;
691    unsigned depth_bytes = format_desc->block.bits / 8;
692    struct lp_type zs_type = lp_depth_type(format_desc, z_src_type.length);
693    struct lp_type z_type = zs_type;
694    struct lp_type zs_load_type = zs_type;
695 
696    zs_load_type.length = zs_load_type.length / 2;
697    load_ptr_type = LLVMPointerType(lp_build_vec_type(gallivm, zs_load_type), 0);
698 
699    z_type.width = z_src_type.width;
700 
701    lp_build_context_init(&z_bld, gallivm, z_type);
702 
703    /*
704     * This is far from ideal, at least for late depth write we should do this
705     * outside the fs loop to avoid all the swizzle stuff.
706     */
707    if (z_src_type.length == 4) {
708       LLVMValueRef looplsb = LLVMBuildAnd(builder, loop_counter,
709                                           lp_build_const_int32(gallivm, 1), "");
710       LLVMValueRef loopmsb = LLVMBuildAnd(builder, loop_counter,
711                                           lp_build_const_int32(gallivm, 2), "");
712       LLVMValueRef offset2 = LLVMBuildMul(builder, loopmsb,
713                                           depth_stride, "");
714       depth_offset1 = LLVMBuildMul(builder, looplsb,
715                                    lp_build_const_int32(gallivm, depth_bytes * 2), "");
716       depth_offset1 = LLVMBuildAdd(builder, depth_offset1, offset2, "");
717    }
718    else {
719       unsigned i;
720       LLVMValueRef loopx2 = LLVMBuildShl(builder, loop_counter,
721                                          lp_build_const_int32(gallivm, 1), "");
722       assert(z_src_type.length == 8);
723       depth_offset1 = LLVMBuildMul(builder, loopx2, depth_stride, "");
724       /*
725        * We load 2x4 values, and need to swizzle them (order
726        * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
727        */
728       for (i = 0; i < 8; i++) {
729          shuffles[i] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
730       }
731    }
732 
733    depth_offset2 = LLVMBuildAdd(builder, depth_offset1, depth_stride, "");
734 
735    zs_dst_ptr1 = LLVMBuildGEP(builder, depth_ptr, &depth_offset1, 1, "");
736    zs_dst_ptr1 = LLVMBuildBitCast(builder, zs_dst_ptr1, load_ptr_type, "");
737    zs_dst_ptr2 = LLVMBuildGEP(builder, depth_ptr, &depth_offset2, 1, "");
738    zs_dst_ptr2 = LLVMBuildBitCast(builder, zs_dst_ptr2, load_ptr_type, "");
739 
740    if (format_desc->block.bits > 32) {
741       s_value = LLVMBuildBitCast(builder, s_value, z_bld.vec_type, "");
742    }
743 
744    if (mask_value) {
745       z_value = lp_build_select(&z_bld, mask_value, z_value, z_fb);
746       if (format_desc->block.bits > 32) {
747          s_fb = LLVMBuildBitCast(builder, s_fb, z_bld.vec_type, "");
748          s_value = lp_build_select(&z_bld, mask_value, s_value, s_fb);
749       }
750    }
751 
752    if (zs_type.width < z_src_type.width) {
753       /* Truncate ZS values (e.g., when writing to Z16_UNORM) */
754       z_value = LLVMBuildTrunc(builder, z_value,
755                                lp_build_int_vec_type(gallivm, zs_type), "");
756    }
757 
758    if (format_desc->block.bits <= 32) {
759       if (z_src_type.length == 4) {
760          zs_dst1 = lp_build_extract_range(gallivm, z_value, 0, 2);
761          zs_dst2 = lp_build_extract_range(gallivm, z_value, 2, 2);
762       }
763       else {
764          assert(z_src_type.length == 8);
765          zs_dst1 = LLVMBuildShuffleVector(builder, z_value, z_value,
766                                           LLVMConstVector(&shuffles[0],
767                                                           zs_load_type.length), "");
768          zs_dst2 = LLVMBuildShuffleVector(builder, z_value, z_value,
769                                           LLVMConstVector(&shuffles[4],
770                                                           zs_load_type.length), "");
771       }
772    }
773    else {
774       if (z_src_type.length == 4) {
775          zs_dst1 = lp_build_interleave2(gallivm, z_type,
776                                         z_value, s_value, 0);
777          zs_dst2 = lp_build_interleave2(gallivm, z_type,
778                                         z_value, s_value, 1);
779       }
780       else {
781          unsigned i;
782          LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 2];
783          assert(z_src_type.length == 8);
784          for (i = 0; i < 8; i++) {
785             shuffles[i*2] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
786             shuffles[i*2+1] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2 +
787                                                    z_src_type.length);
788          }
789          zs_dst1 = LLVMBuildShuffleVector(builder, z_value, s_value,
790                                           LLVMConstVector(&shuffles[0],
791                                                           z_src_type.length), "");
792          zs_dst2 = LLVMBuildShuffleVector(builder, z_value, s_value,
793                                           LLVMConstVector(&shuffles[8],
794                                                           z_src_type.length), "");
795       }
796       zs_dst1 = LLVMBuildBitCast(builder, zs_dst1,
797                                  lp_build_vec_type(gallivm, zs_load_type), "");
798       zs_dst2 = LLVMBuildBitCast(builder, zs_dst2,
799                                  lp_build_vec_type(gallivm, zs_load_type), "");
800    }
801 
802    LLVMBuildStore(builder, zs_dst1, zs_dst_ptr1);
803    if (!is_1d) {
804       LLVMBuildStore(builder, zs_dst2, zs_dst_ptr2);
805    }
806 }
807 
808 /**
809  * Generate code for performing depth and/or stencil tests.
810  * We operate on a vector of values (typically n 2x2 quads).
811  *
812  * \param depth  the depth test state
813  * \param stencil  the front/back stencil state
814  * \param type  the data type of the fragment depth/stencil values
815  * \param format_desc  description of the depth/stencil surface
816  * \param mask  the alive/dead pixel mask for the quad (vector)
817  * \param cov_mask coverage mask
818  * \param stencil_refs  the front/back stencil ref values (scalar)
819  * \param z_src  the incoming depth/stencil values (n 2x2 quad values, float32)
820  * \param zs_dst  the depth/stencil values in framebuffer
821  * \param face  contains boolean value indicating front/back facing polygon
822  */
823 void
lp_build_depth_stencil_test(struct gallivm_state * gallivm,const struct pipe_depth_state * depth,const struct pipe_stencil_state stencil[2],struct lp_type z_src_type,const struct util_format_description * format_desc,struct lp_build_mask_context * mask,LLVMValueRef * cov_mask,LLVMValueRef stencil_refs[2],LLVMValueRef z_src,LLVMValueRef z_fb,LLVMValueRef s_fb,LLVMValueRef face,LLVMValueRef * z_value,LLVMValueRef * s_value,boolean do_branch)824 lp_build_depth_stencil_test(struct gallivm_state *gallivm,
825                             const struct pipe_depth_state *depth,
826                             const struct pipe_stencil_state stencil[2],
827                             struct lp_type z_src_type,
828                             const struct util_format_description *format_desc,
829                             struct lp_build_mask_context *mask,
830                             LLVMValueRef *cov_mask,
831                             LLVMValueRef stencil_refs[2],
832                             LLVMValueRef z_src,
833                             LLVMValueRef z_fb,
834                             LLVMValueRef s_fb,
835                             LLVMValueRef face,
836                             LLVMValueRef *z_value,
837                             LLVMValueRef *s_value,
838                             boolean do_branch)
839 {
840    LLVMBuilderRef builder = gallivm->builder;
841    struct lp_type z_type;
842    struct lp_build_context z_bld;
843    struct lp_build_context s_bld;
844    struct lp_type s_type;
845    unsigned z_shift = 0, z_width = 0, z_mask = 0;
846    LLVMValueRef z_dst = NULL;
847    LLVMValueRef stencil_vals = NULL;
848    LLVMValueRef z_bitmask = NULL, stencil_shift = NULL;
849    LLVMValueRef z_pass = NULL, s_pass_mask = NULL;
850    LLVMValueRef current_mask = mask ? lp_build_mask_value(mask) : *cov_mask;
851    LLVMValueRef front_facing = NULL;
852    boolean have_z, have_s;
853 
854    /*
855     * Depths are expected to be between 0 and 1, even if they are stored in
856     * floats. Setting these bits here will ensure that the lp_build_conv() call
857     * below won't try to unnecessarily clamp the incoming values.
858     */
859    if(z_src_type.floating) {
860       z_src_type.sign = FALSE;
861       z_src_type.norm = TRUE;
862    }
863    else {
864       assert(!z_src_type.sign);
865       assert(z_src_type.norm);
866    }
867 
868    /* Pick the type matching the depth-stencil format. */
869    z_type = lp_depth_type(format_desc, z_src_type.length);
870 
871    /* Pick the intermediate type for depth operations. */
872    z_type.width = z_src_type.width;
873    assert(z_type.length == z_src_type.length);
874 
875    /* FIXME: for non-float depth/stencil might generate better code
876     * if we'd always split it up to use 128bit operations.
877     * For stencil we'd almost certainly want to pack to 8xi16 values,
878     * for z just run twice.
879     */
880 
881    /* Sanity checking */
882    {
883       const unsigned z_swizzle = format_desc->swizzle[0];
884       const unsigned s_swizzle = format_desc->swizzle[1];
885 
886       assert(z_swizzle != PIPE_SWIZZLE_NONE ||
887              s_swizzle != PIPE_SWIZZLE_NONE);
888 
889       assert(depth->enabled || stencil[0].enabled);
890 
891       assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
892       assert(format_desc->block.width == 1);
893       assert(format_desc->block.height == 1);
894 
895       if (stencil[0].enabled) {
896          assert(s_swizzle < 4);
897          assert(format_desc->channel[s_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED);
898          assert(format_desc->channel[s_swizzle].pure_integer);
899          assert(!format_desc->channel[s_swizzle].normalized);
900          assert(format_desc->channel[s_swizzle].size == 8);
901       }
902 
903       if (depth->enabled) {
904          assert(z_swizzle < 4);
905          if (z_type.floating) {
906             assert(z_swizzle == 0);
907             assert(format_desc->channel[z_swizzle].type ==
908                    UTIL_FORMAT_TYPE_FLOAT);
909             assert(format_desc->channel[z_swizzle].size == 32);
910          }
911          else {
912             assert(format_desc->channel[z_swizzle].type ==
913                    UTIL_FORMAT_TYPE_UNSIGNED);
914             assert(format_desc->channel[z_swizzle].normalized);
915             assert(!z_type.fixed);
916          }
917       }
918    }
919 
920 
921    /* Setup build context for Z vals */
922    lp_build_context_init(&z_bld, gallivm, z_type);
923 
924    /* Setup build context for stencil vals */
925    s_type = lp_int_type(z_type);
926    lp_build_context_init(&s_bld, gallivm, s_type);
927 
928    /* Compute and apply the Z/stencil bitmasks and shifts.
929     */
930    {
931       unsigned s_shift, s_mask;
932 
933       z_dst = z_fb;
934       stencil_vals = s_fb;
935 
936       have_z = get_z_shift_and_mask(format_desc, &z_shift, &z_width, &z_mask);
937       have_s = get_s_shift_and_mask(format_desc, &s_shift, &s_mask);
938 
939       if (have_z) {
940          if (z_mask != 0xffffffff) {
941             z_bitmask = lp_build_const_int_vec(gallivm, z_type, z_mask);
942          }
943 
944          /*
945           * Align the framebuffer Z 's LSB to the right.
946           */
947          if (z_shift) {
948             LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_type, z_shift);
949             z_dst = LLVMBuildLShr(builder, z_dst, shift, "z_dst");
950          } else if (z_bitmask) {
951             z_dst = LLVMBuildAnd(builder, z_dst, z_bitmask, "z_dst");
952          } else {
953             lp_build_name(z_dst, "z_dst");
954          }
955       }
956 
957       if (have_s) {
958          if (s_shift) {
959             LLVMValueRef shift = lp_build_const_int_vec(gallivm, s_type, s_shift);
960             stencil_vals = LLVMBuildLShr(builder, stencil_vals, shift, "");
961             stencil_shift = shift;  /* used below */
962          }
963 
964          if (s_mask != 0xffffffff) {
965             LLVMValueRef mask = lp_build_const_int_vec(gallivm, s_type, s_mask);
966             stencil_vals = LLVMBuildAnd(builder, stencil_vals, mask, "");
967          }
968 
969          lp_build_name(stencil_vals, "s_dst");
970       }
971    }
972 
973    if (stencil[0].enabled) {
974 
975       if (face) {
976          if (0) {
977             /*
978              * XXX: the scalar expansion below produces atrocious code
979              * (basically producing a 64bit scalar value, then moving the 2
980              * 32bit pieces separately to simd, plus 4 shuffles, which is
981              * seriously lame). But the scalar-simd transitions are always
982              * tricky, so no big surprise there.
983              * This here would be way better, however llvm has some serious
984              * trouble later using it in the select, probably because it will
985              * recognize the expression as constant and move the simd value
986              * away (out of the loop) - and then it will suddenly try
987              * constructing i1 high-bit masks out of it later...
988              * (Try piglit stencil-twoside.)
989              * Note this is NOT due to using SExt/Trunc, it fails exactly the
990              * same even when using native compare/select.
991              * I cannot reproduce this problem when using stand-alone compiler
992              * though, suggesting some problem with optimization passes...
993              * (With stand-alone compilation, the construction of this mask
994              * value, no matter if the easy 3 instruction here or the complex
995              * 16+ one below, never gets separated from where it's used.)
996              * The scalar code still has the same problem, but the generated
997              * code looks a bit better at least for some reason, even if
998              * mostly by luck (the fundamental issue clearly is the same).
999              */
1000             front_facing = lp_build_broadcast(gallivm, s_bld.vec_type, face);
1001             /* front_facing = face != 0 ? ~0 : 0 */
1002             front_facing = lp_build_compare(gallivm, s_bld.type,
1003                                             PIPE_FUNC_NOTEQUAL,
1004                                             front_facing, s_bld.zero);
1005          } else {
1006             LLVMValueRef zero = lp_build_const_int32(gallivm, 0);
1007 
1008             /* front_facing = face != 0 ? ~0 : 0 */
1009             front_facing = LLVMBuildICmp(builder, LLVMIntNE, face, zero, "");
1010             front_facing = LLVMBuildSExt(builder, front_facing,
1011                                          LLVMIntTypeInContext(gallivm->context,
1012                                                 s_bld.type.length*s_bld.type.width),
1013                                          "");
1014             front_facing = LLVMBuildBitCast(builder, front_facing,
1015                                             s_bld.int_vec_type, "");
1016 
1017          }
1018       }
1019 
1020       s_pass_mask = lp_build_stencil_test(&s_bld, stencil,
1021                                           stencil_refs, stencil_vals,
1022                                           front_facing);
1023 
1024       /* apply stencil-fail operator */
1025       {
1026          LLVMValueRef s_fail_mask = lp_build_andnot(&s_bld, current_mask, s_pass_mask);
1027          stencil_vals = lp_build_stencil_op(&s_bld, stencil, S_FAIL_OP,
1028                                             stencil_refs, stencil_vals,
1029                                             s_fail_mask, front_facing);
1030       }
1031    }
1032 
1033    if (depth->enabled) {
1034       /*
1035        * Convert fragment Z to the desired type, aligning the LSB to the right.
1036        */
1037 
1038       assert(z_type.width == z_src_type.width);
1039       assert(z_type.length == z_src_type.length);
1040       assert(lp_check_value(z_src_type, z_src));
1041       if (z_src_type.floating) {
1042          /*
1043           * Convert from floating point values
1044           */
1045 
1046          if (!z_type.floating) {
1047             z_src = lp_build_clamped_float_to_unsigned_norm(gallivm,
1048                                                             z_src_type,
1049                                                             z_width,
1050                                                             z_src);
1051          }
1052       } else {
1053          /*
1054           * Convert from unsigned normalized values.
1055           */
1056 
1057          assert(!z_src_type.sign);
1058          assert(!z_src_type.fixed);
1059          assert(z_src_type.norm);
1060          assert(!z_type.floating);
1061          if (z_src_type.width > z_width) {
1062             LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_src_type,
1063                                                         z_src_type.width - z_width);
1064             z_src = LLVMBuildLShr(builder, z_src, shift, "");
1065          }
1066       }
1067       assert(lp_check_value(z_type, z_src));
1068 
1069       lp_build_name(z_src, "z_src");
1070 
1071       /* compare src Z to dst Z, returning 'pass' mask */
1072       z_pass = lp_build_cmp(&z_bld, depth->func, z_src, z_dst);
1073 
1074       /* mask off bits that failed stencil test */
1075       if (s_pass_mask) {
1076          current_mask = LLVMBuildAnd(builder, current_mask, s_pass_mask, "");
1077       }
1078 
1079       if (!stencil[0].enabled && mask) {
1080          /* We can potentially skip all remaining operations here, but only
1081           * if stencil is disabled because we still need to update the stencil
1082           * buffer values.  Don't need to update Z buffer values.
1083           */
1084          lp_build_mask_update(mask, z_pass);
1085 
1086          if (do_branch) {
1087             lp_build_mask_check(mask);
1088          }
1089       }
1090 
1091       if (depth->writemask) {
1092          LLVMValueRef z_pass_mask;
1093 
1094          /* mask off bits that failed Z test */
1095          z_pass_mask = LLVMBuildAnd(builder, current_mask, z_pass, "");
1096 
1097          /* Mix the old and new Z buffer values.
1098           * z_dst[i] = zselectmask[i] ? z_src[i] : z_dst[i]
1099           */
1100          z_dst = lp_build_select(&z_bld, z_pass_mask, z_src, z_dst);
1101       }
1102 
1103       if (stencil[0].enabled) {
1104          /* update stencil buffer values according to z pass/fail result */
1105          LLVMValueRef z_fail_mask, z_pass_mask;
1106 
1107          /* apply Z-fail operator */
1108          z_fail_mask = lp_build_andnot(&s_bld, current_mask, z_pass);
1109          stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_FAIL_OP,
1110                                             stencil_refs, stencil_vals,
1111                                             z_fail_mask, front_facing);
1112 
1113          /* apply Z-pass operator */
1114          z_pass_mask = LLVMBuildAnd(builder, current_mask, z_pass, "");
1115          stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_PASS_OP,
1116                                             stencil_refs, stencil_vals,
1117                                             z_pass_mask, front_facing);
1118       }
1119    }
1120    else {
1121       /* No depth test: apply Z-pass operator to stencil buffer values which
1122        * passed the stencil test.
1123        */
1124       s_pass_mask = LLVMBuildAnd(builder, current_mask, s_pass_mask, "");
1125       stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_PASS_OP,
1126                                          stencil_refs, stencil_vals,
1127                                          s_pass_mask, front_facing);
1128    }
1129 
1130    /* Put Z and stencil bits in the right place */
1131    if (have_z && z_shift) {
1132       LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_type, z_shift);
1133       z_dst = LLVMBuildShl(builder, z_dst, shift, "");
1134    }
1135    if (stencil_vals && stencil_shift)
1136       stencil_vals = LLVMBuildShl(builder, stencil_vals,
1137                                   stencil_shift, "");
1138 
1139    /* Finally, merge the z/stencil values */
1140    if (format_desc->block.bits <= 32) {
1141       if (have_z && have_s)
1142          *z_value = LLVMBuildOr(builder, z_dst, stencil_vals, "");
1143       else if (have_z)
1144          *z_value = z_dst;
1145       else
1146          *z_value = stencil_vals;
1147       *s_value = *z_value;
1148    }
1149    else {
1150       *z_value = z_dst;
1151       *s_value = stencil_vals;
1152    }
1153 
1154    if (mask) {
1155       if (s_pass_mask)
1156          lp_build_mask_update(mask, s_pass_mask);
1157 
1158       if (depth->enabled && stencil[0].enabled)
1159          lp_build_mask_update(mask, z_pass);
1160    } else {
1161       LLVMValueRef tmp_mask = *cov_mask;
1162       if (s_pass_mask)
1163          tmp_mask = LLVMBuildAnd(builder, tmp_mask, s_pass_mask, "");
1164 
1165       /* for multisample we don't do the stencil optimisation so update always */
1166       if (depth->enabled)
1167          tmp_mask = LLVMBuildAnd(builder, tmp_mask, z_pass, "");
1168       *cov_mask = tmp_mask;
1169    }
1170 }
1171 
1172