1 /**************************************************************************
2 *
3 * Copyright 2012 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "pipe/p_state.h"
29 #include "util/u_debug.h"
30
31 #include "gallivm/lp_bld_type.h"
32 #include "gallivm/lp_bld_arit.h"
33 #include "gallivm/lp_bld_const.h"
34 #include "gallivm/lp_bld_logic.h"
35 #include "gallivm/lp_bld_swizzle.h"
36 #include "gallivm/lp_bld_flow.h"
37 #include "gallivm/lp_bld_debug.h"
38
39 #include "lp_bld_blend.h"
40
41 /**
42 * Is (a OP b) == (b OP a)?
43 */
44 boolean
lp_build_blend_func_commutative(unsigned func)45 lp_build_blend_func_commutative(unsigned func)
46 {
47 switch (func) {
48 case PIPE_BLEND_ADD:
49 case PIPE_BLEND_MIN:
50 case PIPE_BLEND_MAX:
51 return TRUE;
52 case PIPE_BLEND_SUBTRACT:
53 case PIPE_BLEND_REVERSE_SUBTRACT:
54 return FALSE;
55 default:
56 assert(0);
57 return TRUE;
58 }
59 }
60
61
62 /**
63 * Whether the blending functions are the reverse of each other.
64 */
65 boolean
lp_build_blend_func_reverse(unsigned rgb_func,unsigned alpha_func)66 lp_build_blend_func_reverse(unsigned rgb_func, unsigned alpha_func)
67 {
68 if(rgb_func == alpha_func)
69 return FALSE;
70 if(rgb_func == PIPE_BLEND_SUBTRACT && alpha_func == PIPE_BLEND_REVERSE_SUBTRACT)
71 return TRUE;
72 if(rgb_func == PIPE_BLEND_REVERSE_SUBTRACT && alpha_func == PIPE_BLEND_SUBTRACT)
73 return TRUE;
74 return FALSE;
75 }
76
77
78 /**
79 * Whether the blending factors are complementary of each other.
80 */
81 static inline boolean
lp_build_blend_factor_complementary(unsigned src_factor,unsigned dst_factor)82 lp_build_blend_factor_complementary(unsigned src_factor, unsigned dst_factor)
83 {
84 return dst_factor == (src_factor ^ 0x10);
85 }
86
87
88 /**
89 * @sa http://www.opengl.org/sdk/docs/man/xhtml/glBlendEquationSeparate.xml
90 */
91 LLVMValueRef
lp_build_blend_func(struct lp_build_context * bld,unsigned func,LLVMValueRef term1,LLVMValueRef term2)92 lp_build_blend_func(struct lp_build_context *bld,
93 unsigned func,
94 LLVMValueRef term1,
95 LLVMValueRef term2)
96 {
97 switch (func) {
98 case PIPE_BLEND_ADD:
99 return lp_build_add(bld, term1, term2);
100 case PIPE_BLEND_SUBTRACT:
101 return lp_build_sub(bld, term1, term2);
102 case PIPE_BLEND_REVERSE_SUBTRACT:
103 return lp_build_sub(bld, term2, term1);
104 case PIPE_BLEND_MIN:
105 return lp_build_min(bld, term1, term2);
106 case PIPE_BLEND_MAX:
107 return lp_build_max(bld, term1, term2);
108 default:
109 assert(0);
110 return bld->zero;
111 }
112 }
113
114
115 /**
116 * Performs optimisations and blending independent of SoA/AoS
117 *
118 * @param func the blend function
119 * @param factor_src PIPE_BLENDFACTOR_xxx
120 * @param factor_dst PIPE_BLENDFACTOR_xxx
121 * @param src source rgba
122 * @param dst dest rgba
123 * @param src_factor src factor computed value
124 * @param dst_factor dst factor computed value
125 * @param not_alpha_dependent same factors accross all channels of src/dst
126 *
127 * not_alpha_dependent should be:
128 * SoA: always true as it is only one channel at a time
129 * AoS: rgb_src_factor == alpha_src_factor && rgb_dst_factor == alpha_dst_factor
130 *
131 * Note that pretty much every possible optimisation can only be done on non-unorm targets
132 * due to unorm values not going above 1.0 meaning factorisation can change results.
133 * e.g. (0.9 * 0.9) + (0.9 * 0.9) != 0.9 * (0.9 + 0.9) as result of + is always <= 1.
134 */
135 LLVMValueRef
lp_build_blend(struct lp_build_context * bld,unsigned func,unsigned factor_src,unsigned factor_dst,LLVMValueRef src,LLVMValueRef dst,LLVMValueRef src_factor,LLVMValueRef dst_factor,boolean not_alpha_dependent,boolean optimise_only)136 lp_build_blend(struct lp_build_context *bld,
137 unsigned func,
138 unsigned factor_src,
139 unsigned factor_dst,
140 LLVMValueRef src,
141 LLVMValueRef dst,
142 LLVMValueRef src_factor,
143 LLVMValueRef dst_factor,
144 boolean not_alpha_dependent,
145 boolean optimise_only)
146 {
147 LLVMValueRef result, src_term, dst_term;
148
149 /* If we are not alpha dependent we can mess with the src/dst factors */
150 if (not_alpha_dependent) {
151 if (lp_build_blend_factor_complementary(factor_src, factor_dst)) {
152 if (func == PIPE_BLEND_ADD) {
153 if (factor_src < factor_dst) {
154 return lp_build_lerp(bld, src_factor, dst, src, 0);
155 } else {
156 return lp_build_lerp(bld, dst_factor, src, dst, 0);
157 }
158 } else if(bld->type.floating && func == PIPE_BLEND_SUBTRACT) {
159 result = lp_build_add(bld, src, dst);
160
161 if (factor_src < factor_dst) {
162 result = lp_build_mul(bld, result, src_factor);
163 return lp_build_sub(bld, result, dst);
164 } else {
165 result = lp_build_mul(bld, result, dst_factor);
166 return lp_build_sub(bld, src, result);
167 }
168 } else if(bld->type.floating && func == PIPE_BLEND_REVERSE_SUBTRACT) {
169 result = lp_build_add(bld, src, dst);
170
171 if (factor_src < factor_dst) {
172 result = lp_build_mul(bld, result, src_factor);
173 return lp_build_sub(bld, dst, result);
174 } else {
175 result = lp_build_mul(bld, result, dst_factor);
176 return lp_build_sub(bld, result, src);
177 }
178 }
179 }
180
181 if (bld->type.floating && factor_src == factor_dst) {
182 if (func == PIPE_BLEND_ADD ||
183 func == PIPE_BLEND_SUBTRACT ||
184 func == PIPE_BLEND_REVERSE_SUBTRACT) {
185 LLVMValueRef result;
186 result = lp_build_blend_func(bld, func, src, dst);
187 return lp_build_mul(bld, result, src_factor);
188 }
189 }
190 }
191
192 if (optimise_only)
193 return NULL;
194
195 src_term = lp_build_mul(bld, src, src_factor);
196 dst_term = lp_build_mul(bld, dst, dst_factor);
197 return lp_build_blend_func(bld, func, src_term, dst_term);
198 }
199
200 void
lp_build_alpha_to_coverage(struct gallivm_state * gallivm,struct lp_type type,struct lp_build_mask_context * mask,LLVMValueRef alpha,boolean do_branch)201 lp_build_alpha_to_coverage(struct gallivm_state *gallivm,
202 struct lp_type type,
203 struct lp_build_mask_context *mask,
204 LLVMValueRef alpha,
205 boolean do_branch)
206 {
207 struct lp_build_context bld;
208 LLVMValueRef test;
209 LLVMValueRef alpha_ref_value;
210
211 lp_build_context_init(&bld, gallivm, type);
212
213 alpha_ref_value = lp_build_const_vec(gallivm, type, 0.5);
214
215 test = lp_build_cmp(&bld, PIPE_FUNC_GREATER, alpha, alpha_ref_value);
216
217 lp_build_name(test, "alpha_to_coverage");
218
219 lp_build_mask_update(mask, test);
220
221 if (do_branch)
222 lp_build_mask_check(mask);
223 }
224