1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/format/u_format.h"
38 #include "util/u_math.h"
39 #include "util/u_cpu_detect.h"
40 #include "lp_bld_arit.h"
41 #include "lp_bld_const.h"
42 #include "lp_bld_debug.h"
43 #include "lp_bld_printf.h"
44 #include "lp_bld_flow.h"
45 #include "lp_bld_sample.h"
46 #include "lp_bld_swizzle.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_logic.h"
49 #include "lp_bld_pack.h"
50 #include "lp_bld_quad.h"
51 #include "lp_bld_bitarit.h"
52
53
54 /*
55 * Bri-linear factor. Should be greater than one.
56 */
57 #define BRILINEAR_FACTOR 2
58
59 /**
60 * Does the given texture wrap mode allow sampling the texture border color?
61 * XXX maybe move this into gallium util code.
62 */
63 boolean
lp_sampler_wrap_mode_uses_border_color(unsigned mode,unsigned min_img_filter,unsigned mag_img_filter)64 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
65 unsigned min_img_filter,
66 unsigned mag_img_filter)
67 {
68 switch (mode) {
69 case PIPE_TEX_WRAP_REPEAT:
70 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
71 case PIPE_TEX_WRAP_MIRROR_REPEAT:
72 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
73 return FALSE;
74 case PIPE_TEX_WRAP_CLAMP:
75 case PIPE_TEX_WRAP_MIRROR_CLAMP:
76 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
77 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
78 return FALSE;
79 } else {
80 return TRUE;
81 }
82 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
83 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
84 return TRUE;
85 default:
86 assert(0 && "unexpected wrap mode");
87 return FALSE;
88 }
89 }
90
91
92 /**
93 * Initialize lp_sampler_static_texture_state object with the gallium
94 * texture/sampler_view state (this contains the parts which are
95 * considered static).
96 */
97 void
lp_sampler_static_texture_state(struct lp_static_texture_state * state,const struct pipe_sampler_view * view)98 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
99 const struct pipe_sampler_view *view)
100 {
101 const struct pipe_resource *texture;
102
103 memset(state, 0, sizeof *state);
104
105 if (!view || !view->texture)
106 return;
107
108 texture = view->texture;
109
110 state->format = view->format;
111 state->swizzle_r = view->swizzle_r;
112 state->swizzle_g = view->swizzle_g;
113 state->swizzle_b = view->swizzle_b;
114 state->swizzle_a = view->swizzle_a;
115
116 state->target = view->target;
117 state->pot_width = util_is_power_of_two_or_zero(texture->width0);
118 state->pot_height = util_is_power_of_two_or_zero(texture->height0);
119 state->pot_depth = util_is_power_of_two_or_zero(texture->depth0);
120 state->level_zero_only = !view->u.tex.last_level;
121
122 /*
123 * the layer / element / level parameters are all either dynamic
124 * state or handled transparently wrt execution.
125 */
126 }
127
128 /**
129 * Initialize lp_sampler_static_texture_state object with the gallium
130 * texture/sampler_view state (this contains the parts which are
131 * considered static).
132 */
133 void
lp_sampler_static_texture_state_image(struct lp_static_texture_state * state,const struct pipe_image_view * view)134 lp_sampler_static_texture_state_image(struct lp_static_texture_state *state,
135 const struct pipe_image_view *view)
136 {
137 const struct pipe_resource *resource;
138
139 memset(state, 0, sizeof *state);
140
141 if (!view || !view->resource)
142 return;
143
144 resource = view->resource;
145
146 state->format = view->format;
147 state->swizzle_r = PIPE_SWIZZLE_X;
148 state->swizzle_g = PIPE_SWIZZLE_Y;
149 state->swizzle_b = PIPE_SWIZZLE_Z;
150 state->swizzle_a = PIPE_SWIZZLE_W;
151
152 state->target = view->resource->target;
153 state->pot_width = util_is_power_of_two_or_zero(resource->width0);
154 state->pot_height = util_is_power_of_two_or_zero(resource->height0);
155 state->pot_depth = util_is_power_of_two_or_zero(resource->depth0);
156 state->level_zero_only = 0;
157
158 /*
159 * the layer / element / level parameters are all either dynamic
160 * state or handled transparently wrt execution.
161 */
162 }
163
164 /**
165 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
166 * state (this contains the parts which are considered static).
167 */
168 void
lp_sampler_static_sampler_state(struct lp_static_sampler_state * state,const struct pipe_sampler_state * sampler)169 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
170 const struct pipe_sampler_state *sampler)
171 {
172 memset(state, 0, sizeof *state);
173
174 if (!sampler)
175 return;
176
177 /*
178 * We don't copy sampler state over unless it is actually enabled, to avoid
179 * spurious recompiles, as the sampler static state is part of the shader
180 * key.
181 *
182 * Ideally gallium frontends or cso_cache module would make all state
183 * canonical, but until that happens it's better to be safe than sorry here.
184 *
185 * XXX: Actually there's much more than can be done here, especially
186 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
187 */
188
189 state->wrap_s = sampler->wrap_s;
190 state->wrap_t = sampler->wrap_t;
191 state->wrap_r = sampler->wrap_r;
192 state->min_img_filter = sampler->min_img_filter;
193 state->mag_img_filter = sampler->mag_img_filter;
194 state->min_mip_filter = sampler->min_mip_filter;
195 state->seamless_cube_map = sampler->seamless_cube_map;
196
197 if (sampler->max_lod > 0.0f) {
198 state->max_lod_pos = 1;
199 }
200
201 if (sampler->lod_bias != 0.0f) {
202 state->lod_bias_non_zero = 1;
203 }
204
205 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE ||
206 state->min_img_filter != state->mag_img_filter) {
207
208 /* If min_lod == max_lod we can greatly simplify mipmap selection.
209 * This is a case that occurs during automatic mipmap generation.
210 */
211 if (sampler->min_lod == sampler->max_lod) {
212 state->min_max_lod_equal = 1;
213 } else {
214 if (sampler->min_lod > 0.0f) {
215 state->apply_min_lod = 1;
216 }
217
218 /*
219 * XXX this won't do anything with the mesa state tracker which always
220 * sets max_lod to not more than actually present mip maps...
221 */
222 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
223 state->apply_max_lod = 1;
224 }
225 }
226 }
227
228 state->compare_mode = sampler->compare_mode;
229 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
230 state->compare_func = sampler->compare_func;
231 }
232
233 state->normalized_coords = sampler->normalized_coords;
234 }
235
236
237 /**
238 * Generate code to compute coordinate gradient (rho).
239 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
240 *
241 * The resulting rho has bld->levelf format (per quad or per element).
242 */
243 static LLVMValueRef
lp_build_rho(struct lp_build_sample_context * bld,unsigned texture_unit,LLVMValueRef s,LLVMValueRef t,LLVMValueRef r,LLVMValueRef cube_rho,const struct lp_derivatives * derivs)244 lp_build_rho(struct lp_build_sample_context *bld,
245 unsigned texture_unit,
246 LLVMValueRef s,
247 LLVMValueRef t,
248 LLVMValueRef r,
249 LLVMValueRef cube_rho,
250 const struct lp_derivatives *derivs)
251 {
252 struct gallivm_state *gallivm = bld->gallivm;
253 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
254 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
255 struct lp_build_context *float_bld = &bld->float_bld;
256 struct lp_build_context *coord_bld = &bld->coord_bld;
257 struct lp_build_context *rho_bld = &bld->lodf_bld;
258 const unsigned dims = bld->dims;
259 LLVMValueRef ddx_ddy[2] = {NULL};
260 LLVMBuilderRef builder = bld->gallivm->builder;
261 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
262 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
263 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
264 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
265 LLVMValueRef rho_vec;
266 LLVMValueRef int_size, float_size;
267 LLVMValueRef rho;
268 LLVMValueRef first_level, first_level_vec;
269 unsigned length = coord_bld->type.length;
270 unsigned num_quads = length / 4;
271 boolean rho_per_quad = rho_bld->type.length != length;
272 boolean no_rho_opt = bld->no_rho_approx && (dims > 1);
273 unsigned i;
274 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
275 LLVMValueRef rho_xvec, rho_yvec;
276
277 /* Note that all simplified calculations will only work for isotropic filtering */
278
279 /*
280 * rho calcs are always per quad except for explicit derivs (excluding
281 * the messy cube maps for now) when requested.
282 */
283
284 first_level = bld->dynamic_state->first_level(bld->dynamic_state, bld->gallivm,
285 bld->context_ptr, texture_unit, NULL);
286 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
287 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec, TRUE);
288 float_size = lp_build_int_to_float(float_size_bld, int_size);
289
290 if (cube_rho) {
291 LLVMValueRef cubesize;
292 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
293
294 /*
295 * Cube map code did already everything except size mul and per-quad extraction.
296 * Luckily cube maps are always quadratic!
297 */
298 if (rho_per_quad) {
299 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
300 rho_bld->type, cube_rho, 0);
301 }
302 else {
303 rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4);
304 }
305 /* Could optimize this for single quad just skip the broadcast */
306 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
307 rho_bld->type, float_size, index0);
308 /* skipping sqrt hence returning rho squared */
309 cubesize = lp_build_mul(rho_bld, cubesize, cubesize);
310 rho = lp_build_mul(rho_bld, cubesize, rho);
311 }
312 else if (derivs) {
313 LLVMValueRef ddmax[3] = { NULL }, ddx[3] = { NULL }, ddy[3] = { NULL };
314 for (i = 0; i < dims; i++) {
315 LLVMValueRef floatdim;
316 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
317
318 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
319 coord_bld->type, float_size, indexi);
320
321 /*
322 * note that for rho_per_quad case could reduce math (at some shuffle
323 * cost), but for now use same code to per-pixel lod case.
324 */
325 if (no_rho_opt) {
326 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
327 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
328 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
329 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
330 }
331 else {
332 LLVMValueRef tmpx, tmpy;
333 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
334 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
335 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
336 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
337 }
338 }
339 if (no_rho_opt) {
340 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
341 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
342 if (dims > 2) {
343 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
344 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
345 }
346 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
347 /* skipping sqrt hence returning rho squared */
348 }
349 else {
350 rho = ddmax[0];
351 if (dims > 1) {
352 rho = lp_build_max(coord_bld, rho, ddmax[1]);
353 if (dims > 2) {
354 rho = lp_build_max(coord_bld, rho, ddmax[2]);
355 }
356 }
357 }
358 if (rho_per_quad) {
359 /*
360 * rho_vec contains per-pixel rho, convert to scalar per quad.
361 */
362 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
363 rho_bld->type, rho, 0);
364 }
365 }
366 else {
367 /*
368 * This looks all a bit complex, but it's not that bad
369 * (the shuffle code makes it look worse than it is).
370 * Still, might not be ideal for all cases.
371 */
372 static const unsigned char swizzle0[] = { /* no-op swizzle */
373 0, LP_BLD_SWIZZLE_DONTCARE,
374 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
375 };
376 static const unsigned char swizzle1[] = {
377 1, LP_BLD_SWIZZLE_DONTCARE,
378 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
379 };
380 static const unsigned char swizzle2[] = {
381 2, LP_BLD_SWIZZLE_DONTCARE,
382 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
383 };
384
385 if (dims < 2) {
386 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
387 }
388 else if (dims >= 2) {
389 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
390 if (dims > 2) {
391 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
392 }
393 }
394
395 if (no_rho_opt) {
396 static const unsigned char swizzle01[] = { /* no-op swizzle */
397 0, 1,
398 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
399 };
400 static const unsigned char swizzle23[] = {
401 2, 3,
402 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
403 };
404 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
405
406 for (i = 0; i < num_quads; i++) {
407 shuffles[i*4+0] = shuffles[i*4+1] = index0;
408 shuffles[i*4+2] = shuffles[i*4+3] = index1;
409 }
410 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
411 LLVMConstVector(shuffles, length), "");
412 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
413 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
414 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
415 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
416 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
417
418 if (dims > 2) {
419 static const unsigned char swizzle02[] = {
420 0, 2,
421 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
422 };
423 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
424 coord_bld->type, float_size, index2);
425 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
426 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
427 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
428 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
429 }
430
431 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
432 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
433 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
434
435 if (rho_per_quad) {
436 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
437 rho_bld->type, rho, 0);
438 }
439 else {
440 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
441 }
442 /* skipping sqrt hence returning rho squared */
443 }
444 else {
445 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
446 if (dims > 2) {
447 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
448 }
449 else {
450 ddx_ddy[1] = NULL; /* silence compiler warning */
451 }
452
453 if (dims < 2) {
454 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
455 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2);
456 }
457 else if (dims == 2) {
458 static const unsigned char swizzle02[] = {
459 0, 2,
460 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
461 };
462 static const unsigned char swizzle13[] = {
463 1, 3,
464 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
465 };
466 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
467 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
468 }
469 else {
470 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
471 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
472 assert(dims == 3);
473 for (i = 0; i < num_quads; i++) {
474 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
475 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
476 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
477 shuffles1[4*i + 3] = i32undef;
478 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
479 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
480 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
481 shuffles2[4*i + 3] = i32undef;
482 }
483 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
484 LLVMConstVector(shuffles1, length), "");
485 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
486 LLVMConstVector(shuffles2, length), "");
487 }
488
489 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
490
491 if (bld->coord_type.length > 4) {
492 /* expand size to each quad */
493 if (dims > 1) {
494 /* could use some broadcast_vector helper for this? */
495 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
496 for (i = 0; i < num_quads; i++) {
497 src[i] = float_size;
498 }
499 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
500 }
501 else {
502 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
503 }
504 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
505
506 if (dims <= 1) {
507 rho = rho_vec;
508 }
509 else {
510 if (dims >= 2) {
511 LLVMValueRef rho_s, rho_t, rho_r;
512
513 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
514 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
515
516 rho = lp_build_max(coord_bld, rho_s, rho_t);
517
518 if (dims >= 3) {
519 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
520 rho = lp_build_max(coord_bld, rho, rho_r);
521 }
522 }
523 }
524 if (rho_per_quad) {
525 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
526 rho_bld->type, rho, 0);
527 }
528 else {
529 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
530 }
531 }
532 else {
533 if (dims <= 1) {
534 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
535 }
536 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
537
538 if (dims <= 1) {
539 rho = rho_vec;
540 }
541 else {
542 if (dims >= 2) {
543 LLVMValueRef rho_s, rho_t, rho_r;
544
545 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
546 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
547
548 rho = lp_build_max(float_bld, rho_s, rho_t);
549
550 if (dims >= 3) {
551 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
552 rho = lp_build_max(float_bld, rho, rho_r);
553 }
554 }
555 }
556 if (!rho_per_quad) {
557 rho = lp_build_broadcast_scalar(rho_bld, rho);
558 }
559 }
560 }
561 }
562
563 return rho;
564 }
565
566
567 /*
568 * Bri-linear lod computation
569 *
570 * Use a piece-wise linear approximation of log2 such that:
571 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
572 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
573 * with the steepness specified in 'factor'
574 * - exact result for 0.5, 1.5, etc.
575 *
576 *
577 * 1.0 - /----*
578 * /
579 * /
580 * /
581 * 0.5 - *
582 * /
583 * /
584 * /
585 * 0.0 - *----/
586 *
587 * | |
588 * 2^0 2^1
589 *
590 * This is a technique also commonly used in hardware:
591 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
592 *
593 * TODO: For correctness, this should only be applied when texture is known to
594 * have regular mipmaps, i.e., mipmaps derived from the base level.
595 *
596 * TODO: This could be done in fixed point, where applicable.
597 */
598 static void
lp_build_brilinear_lod(struct lp_build_context * bld,LLVMValueRef lod,double factor,LLVMValueRef * out_lod_ipart,LLVMValueRef * out_lod_fpart)599 lp_build_brilinear_lod(struct lp_build_context *bld,
600 LLVMValueRef lod,
601 double factor,
602 LLVMValueRef *out_lod_ipart,
603 LLVMValueRef *out_lod_fpart)
604 {
605 LLVMValueRef lod_fpart;
606 double pre_offset = (factor - 0.5)/factor - 0.5;
607 double post_offset = 1 - factor;
608
609 if (0) {
610 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
611 }
612
613 lod = lp_build_add(bld, lod,
614 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
615
616 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
617
618 lod_fpart = lp_build_mad(bld, lod_fpart,
619 lp_build_const_vec(bld->gallivm, bld->type, factor),
620 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
621
622 /*
623 * It's not necessary to clamp lod_fpart since:
624 * - the above expression will never produce numbers greater than one.
625 * - the mip filtering branch is only taken if lod_fpart is positive
626 */
627
628 *out_lod_fpart = lod_fpart;
629
630 if (0) {
631 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
632 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
633 }
634 }
635
636
637 /*
638 * Combined log2 and brilinear lod computation.
639 *
640 * It's in all identical to calling lp_build_fast_log2() and
641 * lp_build_brilinear_lod() above, but by combining we can compute the integer
642 * and fractional part independently.
643 */
644 static void
lp_build_brilinear_rho(struct lp_build_context * bld,LLVMValueRef rho,double factor,LLVMValueRef * out_lod_ipart,LLVMValueRef * out_lod_fpart)645 lp_build_brilinear_rho(struct lp_build_context *bld,
646 LLVMValueRef rho,
647 double factor,
648 LLVMValueRef *out_lod_ipart,
649 LLVMValueRef *out_lod_fpart)
650 {
651 LLVMValueRef lod_ipart;
652 LLVMValueRef lod_fpart;
653
654 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
655 const double post_offset = 1 - 2*factor;
656
657 assert(bld->type.floating);
658
659 assert(lp_check_value(bld->type, rho));
660
661 /*
662 * The pre factor will make the intersections with the exact powers of two
663 * happen precisely where we want them to be, which means that the integer
664 * part will not need any post adjustments.
665 */
666 rho = lp_build_mul(bld, rho,
667 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
668
669 /* ipart = ifloor(log2(rho)) */
670 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
671
672 /* fpart = rho / 2**ipart */
673 lod_fpart = lp_build_extract_mantissa(bld, rho);
674
675 lod_fpart = lp_build_mad(bld, lod_fpart,
676 lp_build_const_vec(bld->gallivm, bld->type, factor),
677 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
678
679 /*
680 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
681 * - the above expression will never produce numbers greater than one.
682 * - the mip filtering branch is only taken if lod_fpart is positive
683 */
684
685 *out_lod_ipart = lod_ipart;
686 *out_lod_fpart = lod_fpart;
687 }
688
689
690 /**
691 * Fast implementation of iround(log2(sqrt(x))), based on
692 * log2(x^n) == n*log2(x).
693 *
694 * Gives accurate results all the time.
695 * (Could be trivially extended to handle other power-of-two roots.)
696 */
697 static LLVMValueRef
lp_build_ilog2_sqrt(struct lp_build_context * bld,LLVMValueRef x)698 lp_build_ilog2_sqrt(struct lp_build_context *bld,
699 LLVMValueRef x)
700 {
701 LLVMBuilderRef builder = bld->gallivm->builder;
702 LLVMValueRef ipart;
703 struct lp_type i_type = lp_int_type(bld->type);
704 LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1);
705
706 assert(bld->type.floating);
707
708 assert(lp_check_value(bld->type, x));
709
710 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
711 ipart = lp_build_extract_exponent(bld, x, 1);
712 ipart = LLVMBuildAShr(builder, ipart, one, "");
713
714 return ipart;
715 }
716
717
718 /**
719 * Generate code to compute texture level of detail (lambda).
720 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
721 * \param lod_bias optional float vector with the shader lod bias
722 * \param explicit_lod optional float vector with the explicit lod
723 * \param cube_rho rho calculated by cube coord mapping (optional)
724 * \param out_lod_ipart integer part of lod
725 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
726 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
727 *
728 * The resulting lod can be scalar per quad or be per element.
729 */
730 void
lp_build_lod_selector(struct lp_build_sample_context * bld,boolean is_lodq,unsigned texture_unit,unsigned sampler_unit,LLVMValueRef s,LLVMValueRef t,LLVMValueRef r,LLVMValueRef cube_rho,const struct lp_derivatives * derivs,LLVMValueRef lod_bias,LLVMValueRef explicit_lod,unsigned mip_filter,LLVMValueRef * out_lod,LLVMValueRef * out_lod_ipart,LLVMValueRef * out_lod_fpart,LLVMValueRef * out_lod_positive)731 lp_build_lod_selector(struct lp_build_sample_context *bld,
732 boolean is_lodq,
733 unsigned texture_unit,
734 unsigned sampler_unit,
735 LLVMValueRef s,
736 LLVMValueRef t,
737 LLVMValueRef r,
738 LLVMValueRef cube_rho,
739 const struct lp_derivatives *derivs,
740 LLVMValueRef lod_bias, /* optional */
741 LLVMValueRef explicit_lod, /* optional */
742 unsigned mip_filter,
743 LLVMValueRef *out_lod,
744 LLVMValueRef *out_lod_ipart,
745 LLVMValueRef *out_lod_fpart,
746 LLVMValueRef *out_lod_positive)
747
748 {
749 LLVMBuilderRef builder = bld->gallivm->builder;
750 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
751 struct lp_build_context *lodf_bld = &bld->lodf_bld;
752 LLVMValueRef lod;
753
754 *out_lod_ipart = bld->lodi_bld.zero;
755 *out_lod_positive = bld->lodi_bld.zero;
756 *out_lod_fpart = lodf_bld->zero;
757
758 /*
759 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
760 * "Implementations may either unconditionally assume c = 0 for the minification
761 * vs. magnification switch-over point, or may choose to make c depend on the
762 * combination of minification and magnification modes as follows: if the
763 * magnification filter is given by LINEAR and the minification filter is given
764 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
765 * done to ensure that a minified texture does not appear "sharper" than a
766 * magnified texture. Otherwise c = 0."
767 * And 3.9.11 Texture Minification:
768 * "If lod is less than or equal to the constant c (see section 3.9.12) the
769 * texture is said to be magnified; if it is greater, the texture is minified."
770 * So, using 0 as switchover point always, and using magnification for lod == 0.
771 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
772 * old GL versions required 0.5 for the modes listed above.
773 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
774 */
775
776 if (bld->static_sampler_state->min_max_lod_equal && !is_lodq) {
777 /* User is forcing sampling from a particular mipmap level.
778 * This is hit during mipmap generation.
779 */
780 LLVMValueRef min_lod =
781 dynamic_state->min_lod(dynamic_state, bld->gallivm,
782 bld->context_ptr, sampler_unit);
783
784 lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
785 }
786 else {
787 if (explicit_lod) {
788 if (bld->num_lods != bld->coord_type.length)
789 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
790 lodf_bld->type, explicit_lod, 0);
791 else
792 lod = explicit_lod;
793 }
794 else {
795 LLVMValueRef rho;
796 boolean rho_squared = (bld->no_rho_approx &&
797 (bld->dims > 1)) || cube_rho;
798
799 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
800
801 /*
802 * Compute lod = log2(rho)
803 */
804
805 if (!lod_bias && !is_lodq &&
806 !bld->static_sampler_state->lod_bias_non_zero &&
807 !bld->static_sampler_state->apply_max_lod &&
808 !bld->static_sampler_state->apply_min_lod) {
809 /*
810 * Special case when there are no post-log2 adjustments, which
811 * saves instructions but keeping the integer and fractional lod
812 * computations separate from the start.
813 */
814
815 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
816 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
817 /*
818 * Don't actually need both values all the time, lod_ipart is
819 * needed for nearest mipfilter, lod_positive if min != mag.
820 */
821 if (rho_squared) {
822 *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho);
823 }
824 else {
825 *out_lod_ipart = lp_build_ilog2(lodf_bld, rho);
826 }
827 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
828 rho, lodf_bld->one);
829 return;
830 }
831 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
832 !bld->no_brilinear && !rho_squared) {
833 /*
834 * This can't work if rho is squared. Not sure if it could be
835 * fixed while keeping it worthwile, could also do sqrt here
836 * but brilinear and no_rho_opt seems like a combination not
837 * making much sense anyway so just use ordinary path below.
838 */
839 lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR,
840 out_lod_ipart, out_lod_fpart);
841 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
842 rho, lodf_bld->one);
843 return;
844 }
845 }
846
847 if (0) {
848 lod = lp_build_log2(lodf_bld, rho);
849 }
850 else {
851 /* get more accurate results if we just sqaure rho always */
852 if (!rho_squared)
853 rho = lp_build_mul(lodf_bld, rho, rho);
854 lod = lp_build_fast_log2(lodf_bld, rho);
855 }
856
857 /* log2(x^2) == 0.5*log2(x) */
858 lod = lp_build_mul(lodf_bld, lod,
859 lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F));
860
861 /* add shader lod bias */
862 if (lod_bias) {
863 if (bld->num_lods != bld->coord_type.length)
864 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
865 lodf_bld->type, lod_bias, 0);
866 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
867 }
868 }
869
870 /* add sampler lod bias */
871 if (bld->static_sampler_state->lod_bias_non_zero) {
872 LLVMValueRef sampler_lod_bias =
873 dynamic_state->lod_bias(dynamic_state, bld->gallivm,
874 bld->context_ptr, sampler_unit);
875 sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld,
876 sampler_lod_bias);
877 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
878 }
879
880 if (is_lodq) {
881 *out_lod = lod;
882 }
883
884 /* clamp lod */
885 if (bld->static_sampler_state->apply_max_lod) {
886 LLVMValueRef max_lod =
887 dynamic_state->max_lod(dynamic_state, bld->gallivm,
888 bld->context_ptr, sampler_unit);
889 max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod);
890
891 lod = lp_build_min(lodf_bld, lod, max_lod);
892 }
893 if (bld->static_sampler_state->apply_min_lod) {
894 LLVMValueRef min_lod =
895 dynamic_state->min_lod(dynamic_state, bld->gallivm,
896 bld->context_ptr, sampler_unit);
897 min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
898
899 lod = lp_build_max(lodf_bld, lod, min_lod);
900 }
901
902 if (is_lodq) {
903 *out_lod_fpart = lod;
904 return;
905 }
906 }
907
908 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
909 lod, lodf_bld->zero);
910
911 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
912 if (!bld->no_brilinear) {
913 lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR,
914 out_lod_ipart, out_lod_fpart);
915 }
916 else {
917 lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart);
918 }
919
920 lp_build_name(*out_lod_fpart, "lod_fpart");
921 }
922 else {
923 *out_lod_ipart = lp_build_iround(lodf_bld, lod);
924 }
925
926 lp_build_name(*out_lod_ipart, "lod_ipart");
927
928 return;
929 }
930
931
932 /**
933 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
934 * to actual mip level.
935 * Note: this is all scalar per quad code.
936 * \param lod_ipart int texture level of detail
937 * \param level_out returns integer
938 * \param out_of_bounds returns per coord out_of_bounds mask if provided
939 */
940 void
lp_build_nearest_mip_level(struct lp_build_sample_context * bld,unsigned texture_unit,LLVMValueRef lod_ipart,LLVMValueRef * level_out,LLVMValueRef * out_of_bounds)941 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
942 unsigned texture_unit,
943 LLVMValueRef lod_ipart,
944 LLVMValueRef *level_out,
945 LLVMValueRef *out_of_bounds)
946 {
947 struct lp_build_context *leveli_bld = &bld->leveli_bld;
948 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
949 LLVMValueRef first_level, last_level, level;
950
951 first_level = dynamic_state->first_level(dynamic_state, bld->gallivm,
952 bld->context_ptr, texture_unit, NULL);
953 last_level = dynamic_state->last_level(dynamic_state, bld->gallivm,
954 bld->context_ptr, texture_unit, NULL);
955 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
956 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
957
958 level = lp_build_add(leveli_bld, lod_ipart, first_level);
959
960 if (out_of_bounds) {
961 LLVMValueRef out, out1;
962 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
963 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
964 out = lp_build_or(leveli_bld, out, out1);
965 if (bld->num_mips == bld->coord_bld.type.length) {
966 *out_of_bounds = out;
967 }
968 else if (bld->num_mips == 1) {
969 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
970 }
971 else {
972 assert(bld->num_mips == bld->coord_bld.type.length / 4);
973 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
974 leveli_bld->type,
975 bld->int_coord_bld.type,
976 out);
977 }
978 level = lp_build_andnot(&bld->int_coord_bld, level, *out_of_bounds);
979 *level_out = level;
980 }
981 else {
982 /* clamp level to legal range of levels */
983 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
984
985 }
986 }
987
988
989 /**
990 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
991 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
992 * part accordingly.
993 * Later, we'll sample from those two mipmap levels and interpolate between them.
994 */
995 void
lp_build_linear_mip_levels(struct lp_build_sample_context * bld,unsigned texture_unit,LLVMValueRef lod_ipart,LLVMValueRef * lod_fpart_inout,LLVMValueRef * level0_out,LLVMValueRef * level1_out)996 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
997 unsigned texture_unit,
998 LLVMValueRef lod_ipart,
999 LLVMValueRef *lod_fpart_inout,
1000 LLVMValueRef *level0_out,
1001 LLVMValueRef *level1_out)
1002 {
1003 LLVMBuilderRef builder = bld->gallivm->builder;
1004 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
1005 struct lp_build_context *leveli_bld = &bld->leveli_bld;
1006 struct lp_build_context *levelf_bld = &bld->levelf_bld;
1007 LLVMValueRef first_level, last_level;
1008 LLVMValueRef clamp_min;
1009 LLVMValueRef clamp_max;
1010
1011 assert(bld->num_lods == bld->num_mips);
1012
1013 first_level = dynamic_state->first_level(dynamic_state, bld->gallivm,
1014 bld->context_ptr, texture_unit, NULL);
1015 last_level = dynamic_state->last_level(dynamic_state, bld->gallivm,
1016 bld->context_ptr, texture_unit, NULL);
1017 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
1018 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
1019
1020 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
1021 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
1022
1023 /*
1024 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
1025 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
1026 * ends in the process.
1027 */
1028
1029 /* *level0_out < first_level */
1030 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
1031 *level0_out, first_level,
1032 "clamp_lod_to_first");
1033
1034 *level0_out = LLVMBuildSelect(builder, clamp_min,
1035 first_level, *level0_out, "");
1036
1037 *level1_out = LLVMBuildSelect(builder, clamp_min,
1038 first_level, *level1_out, "");
1039
1040 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
1041 levelf_bld->zero, *lod_fpart_inout, "");
1042
1043 /* *level0_out >= last_level */
1044 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
1045 *level0_out, last_level,
1046 "clamp_lod_to_last");
1047
1048 *level0_out = LLVMBuildSelect(builder, clamp_max,
1049 last_level, *level0_out, "");
1050
1051 *level1_out = LLVMBuildSelect(builder, clamp_max,
1052 last_level, *level1_out, "");
1053
1054 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
1055 levelf_bld->zero, *lod_fpart_inout, "");
1056
1057 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
1058 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
1059 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
1060 }
1061
1062
1063 /**
1064 * Return pointer to a single mipmap level.
1065 * \param level integer mipmap level
1066 */
1067 LLVMValueRef
lp_build_get_mipmap_level(struct lp_build_sample_context * bld,LLVMValueRef level)1068 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
1069 LLVMValueRef level)
1070 {
1071 LLVMBuilderRef builder = bld->gallivm->builder;
1072 LLVMValueRef indexes[2], data_ptr, mip_offset;
1073
1074 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1075 indexes[1] = level;
1076 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1077 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
1078 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
1079 return data_ptr;
1080 }
1081
1082 /**
1083 * Return (per-pixel) offsets to mip levels.
1084 * \param level integer mipmap level
1085 */
1086 LLVMValueRef
lp_build_get_mip_offsets(struct lp_build_sample_context * bld,LLVMValueRef level)1087 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
1088 LLVMValueRef level)
1089 {
1090 LLVMBuilderRef builder = bld->gallivm->builder;
1091 LLVMValueRef indexes[2], offsets, offset1;
1092
1093 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1094 if (bld->num_mips == 1) {
1095 indexes[1] = level;
1096 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1097 offset1 = LLVMBuildLoad(builder, offset1, "");
1098 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
1099 }
1100 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1101 unsigned i;
1102
1103 offsets = bld->int_coord_bld.undef;
1104 for (i = 0; i < bld->num_mips; i++) {
1105 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1106 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1107 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1108 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1109 offset1 = LLVMBuildLoad(builder, offset1, "");
1110 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
1111 }
1112 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
1113 }
1114 else {
1115 unsigned i;
1116
1117 assert (bld->num_mips == bld->coord_bld.type.length);
1118
1119 offsets = bld->int_coord_bld.undef;
1120 for (i = 0; i < bld->num_mips; i++) {
1121 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1122 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1123 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1124 offset1 = LLVMBuildLoad(builder, offset1, "");
1125 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
1126 }
1127 }
1128 return offsets;
1129 }
1130
1131
1132 /**
1133 * Codegen equivalent for u_minify().
1134 * @param lod_scalar if lod is a (broadcasted) scalar
1135 * Return max(1, base_size >> level);
1136 */
1137 LLVMValueRef
lp_build_minify(struct lp_build_context * bld,LLVMValueRef base_size,LLVMValueRef level,boolean lod_scalar)1138 lp_build_minify(struct lp_build_context *bld,
1139 LLVMValueRef base_size,
1140 LLVMValueRef level,
1141 boolean lod_scalar)
1142 {
1143 LLVMBuilderRef builder = bld->gallivm->builder;
1144 assert(lp_check_value(bld->type, base_size));
1145 assert(lp_check_value(bld->type, level));
1146
1147 if (level == bld->zero) {
1148 /* if we're using mipmap level zero, no minification is needed */
1149 return base_size;
1150 }
1151 else {
1152 LLVMValueRef size;
1153 assert(bld->type.sign);
1154 if (lod_scalar ||
1155 (util_cpu_caps.has_avx2 || !util_cpu_caps.has_sse)) {
1156 size = LLVMBuildLShr(builder, base_size, level, "minify");
1157 size = lp_build_max(bld, size, bld->one);
1158 }
1159 else {
1160 /*
1161 * emulate shift with float mul, since intel "forgot" shifts with
1162 * per-element shift count until avx2, which results in terrible
1163 * scalar extraction (both count and value), scalar shift,
1164 * vector reinsertion. Should not be an issue on any non-x86 cpu
1165 * with a vector instruction set.
1166 * On cpus with AMD's XOP this should also be unnecessary but I'm
1167 * not sure if llvm would emit this with current flags.
1168 */
1169 LLVMValueRef const127, const23, lf;
1170 struct lp_type ftype;
1171 struct lp_build_context fbld;
1172 ftype = lp_type_float_vec(32, bld->type.length * bld->type.width);
1173 lp_build_context_init(&fbld, bld->gallivm, ftype);
1174 const127 = lp_build_const_int_vec(bld->gallivm, bld->type, 127);
1175 const23 = lp_build_const_int_vec(bld->gallivm, bld->type, 23);
1176
1177 /* calculate 2^(-level) float */
1178 lf = lp_build_sub(bld, const127, level);
1179 lf = lp_build_shl(bld, lf, const23);
1180 lf = LLVMBuildBitCast(builder, lf, fbld.vec_type, "");
1181
1182 /* finish shift operation by doing float mul */
1183 base_size = lp_build_int_to_float(&fbld, base_size);
1184 size = lp_build_mul(&fbld, base_size, lf);
1185 /*
1186 * do the max also with floats because
1187 * a) non-emulated int max requires sse41
1188 * (this is actually a lie as we could cast to 16bit values
1189 * as 16bit is sufficient and 16bit int max is sse2)
1190 * b) with avx we can do int max 4-wide but float max 8-wide
1191 */
1192 size = lp_build_max(&fbld, size, fbld.one);
1193 size = lp_build_itrunc(&fbld, size);
1194 }
1195 return size;
1196 }
1197 }
1198
1199
1200 /**
1201 * Dereference stride_array[mipmap_level] array to get a stride.
1202 * Return stride as a vector.
1203 */
1204 static LLVMValueRef
lp_build_get_level_stride_vec(struct lp_build_sample_context * bld,LLVMValueRef stride_array,LLVMValueRef level)1205 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1206 LLVMValueRef stride_array, LLVMValueRef level)
1207 {
1208 LLVMBuilderRef builder = bld->gallivm->builder;
1209 LLVMValueRef indexes[2], stride, stride1;
1210 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1211 if (bld->num_mips == 1) {
1212 indexes[1] = level;
1213 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1214 stride1 = LLVMBuildLoad(builder, stride1, "");
1215 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1216 }
1217 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1218 LLVMValueRef stride1;
1219 unsigned i;
1220
1221 stride = bld->int_coord_bld.undef;
1222 for (i = 0; i < bld->num_mips; i++) {
1223 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1224 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1225 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1226 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1227 stride1 = LLVMBuildLoad(builder, stride1, "");
1228 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1229 }
1230 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1231 }
1232 else {
1233 LLVMValueRef stride1;
1234 unsigned i;
1235
1236 assert (bld->num_mips == bld->coord_bld.type.length);
1237
1238 stride = bld->int_coord_bld.undef;
1239 for (i = 0; i < bld->coord_bld.type.length; i++) {
1240 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1241 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1242 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1243 stride1 = LLVMBuildLoad(builder, stride1, "");
1244 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1245 }
1246 }
1247 return stride;
1248 }
1249
1250
1251 /**
1252 * When sampling a mipmap, we need to compute the width, height, depth
1253 * of the source levels from the level indexes. This helper function
1254 * does that.
1255 */
1256 void
lp_build_mipmap_level_sizes(struct lp_build_sample_context * bld,LLVMValueRef ilevel,LLVMValueRef * out_size,LLVMValueRef * row_stride_vec,LLVMValueRef * img_stride_vec)1257 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1258 LLVMValueRef ilevel,
1259 LLVMValueRef *out_size,
1260 LLVMValueRef *row_stride_vec,
1261 LLVMValueRef *img_stride_vec)
1262 {
1263 const unsigned dims = bld->dims;
1264 LLVMValueRef ilevel_vec;
1265
1266 /*
1267 * Compute width, height, depth at mipmap level 'ilevel'
1268 */
1269 if (bld->num_mips == 1) {
1270 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1271 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec, TRUE);
1272 }
1273 else {
1274 LLVMValueRef int_size_vec;
1275 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1276 unsigned num_quads = bld->coord_bld.type.length / 4;
1277 unsigned i;
1278
1279 if (bld->num_mips == num_quads) {
1280 /*
1281 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1282 * intel "forgot" the variable shift count instruction until avx2.
1283 * A harmless 8x32 shift gets translated into 32 instructions
1284 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1285 * unable to recognize if there are really just 2 different shift
1286 * count values. So do the shift 4-wide before expansion.
1287 */
1288 struct lp_build_context bld4;
1289 struct lp_type type4;
1290
1291 type4 = bld->int_coord_bld.type;
1292 type4.length = 4;
1293
1294 lp_build_context_init(&bld4, bld->gallivm, type4);
1295
1296 if (bld->dims == 1) {
1297 assert(bld->int_size_in_bld.type.length == 1);
1298 int_size_vec = lp_build_broadcast_scalar(&bld4,
1299 bld->int_size);
1300 }
1301 else {
1302 assert(bld->int_size_in_bld.type.length == 4);
1303 int_size_vec = bld->int_size;
1304 }
1305
1306 for (i = 0; i < num_quads; i++) {
1307 LLVMValueRef ileveli;
1308 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1309
1310 ileveli = lp_build_extract_broadcast(bld->gallivm,
1311 bld->leveli_bld.type,
1312 bld4.type,
1313 ilevel,
1314 indexi);
1315 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli, TRUE);
1316 }
1317 /*
1318 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1319 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1320 */
1321 *out_size = lp_build_concat(bld->gallivm,
1322 tmp,
1323 bld4.type,
1324 num_quads);
1325 }
1326 else {
1327 /* FIXME: this is terrible and results in _huge_ vector
1328 * (for the dims > 1 case).
1329 * Should refactor this (together with extract_image_sizes) and do
1330 * something more useful. Could for instance if we have width,height
1331 * with 4-wide vector pack all elements into a 8xi16 vector
1332 * (on which we can still do useful math) instead of using a 16xi32
1333 * vector.
1334 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1335 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1336 */
1337 assert(bld->num_mips == bld->coord_bld.type.length);
1338 if (bld->dims == 1) {
1339 assert(bld->int_size_in_bld.type.length == 1);
1340 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1341 bld->int_size);
1342 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel, FALSE);
1343 }
1344 else {
1345 LLVMValueRef ilevel1;
1346 for (i = 0; i < bld->num_mips; i++) {
1347 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1348 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1349 bld->int_size_in_bld.type, ilevel, indexi);
1350 tmp[i] = bld->int_size;
1351 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1, TRUE);
1352 }
1353 *out_size = lp_build_concat(bld->gallivm, tmp,
1354 bld->int_size_in_bld.type,
1355 bld->num_mips);
1356 }
1357 }
1358 }
1359
1360 if (dims >= 2) {
1361 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1362 bld->row_stride_array,
1363 ilevel);
1364 }
1365 if (dims == 3 || has_layer_coord(bld->static_texture_state->target)) {
1366 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1367 bld->img_stride_array,
1368 ilevel);
1369 }
1370 }
1371
1372
1373 /**
1374 * Extract and broadcast texture size.
1375 *
1376 * @param size_type type of the texture size vector (either
1377 * bld->int_size_type or bld->float_size_type)
1378 * @param coord_type type of the texture size vector (either
1379 * bld->int_coord_type or bld->coord_type)
1380 * @param size vector with the texture size (width, height, depth)
1381 */
1382 void
lp_build_extract_image_sizes(struct lp_build_sample_context * bld,struct lp_build_context * size_bld,struct lp_type coord_type,LLVMValueRef size,LLVMValueRef * out_width,LLVMValueRef * out_height,LLVMValueRef * out_depth)1383 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1384 struct lp_build_context *size_bld,
1385 struct lp_type coord_type,
1386 LLVMValueRef size,
1387 LLVMValueRef *out_width,
1388 LLVMValueRef *out_height,
1389 LLVMValueRef *out_depth)
1390 {
1391 const unsigned dims = bld->dims;
1392 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1393 struct lp_type size_type = size_bld->type;
1394
1395 if (bld->num_mips == 1) {
1396 *out_width = lp_build_extract_broadcast(bld->gallivm,
1397 size_type,
1398 coord_type,
1399 size,
1400 LLVMConstInt(i32t, 0, 0));
1401 if (dims >= 2) {
1402 *out_height = lp_build_extract_broadcast(bld->gallivm,
1403 size_type,
1404 coord_type,
1405 size,
1406 LLVMConstInt(i32t, 1, 0));
1407 if (dims == 3) {
1408 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1409 size_type,
1410 coord_type,
1411 size,
1412 LLVMConstInt(i32t, 2, 0));
1413 }
1414 }
1415 }
1416 else {
1417 unsigned num_quads = bld->coord_bld.type.length / 4;
1418
1419 if (dims == 1) {
1420 *out_width = size;
1421 }
1422 else if (bld->num_mips == num_quads) {
1423 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1424 if (dims >= 2) {
1425 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1426 if (dims == 3) {
1427 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1428 }
1429 }
1430 }
1431 else {
1432 assert(bld->num_mips == bld->coord_type.length);
1433 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1434 coord_type, size, 0);
1435 if (dims >= 2) {
1436 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1437 coord_type, size, 1);
1438 if (dims == 3) {
1439 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1440 coord_type, size, 2);
1441 }
1442 }
1443 }
1444 }
1445 }
1446
1447
1448 /**
1449 * Unnormalize coords.
1450 *
1451 * @param flt_size vector with the integer texture size (width, height, depth)
1452 */
1453 void
lp_build_unnormalized_coords(struct lp_build_sample_context * bld,LLVMValueRef flt_size,LLVMValueRef * s,LLVMValueRef * t,LLVMValueRef * r)1454 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1455 LLVMValueRef flt_size,
1456 LLVMValueRef *s,
1457 LLVMValueRef *t,
1458 LLVMValueRef *r)
1459 {
1460 const unsigned dims = bld->dims;
1461 LLVMValueRef width;
1462 LLVMValueRef height = NULL;
1463 LLVMValueRef depth = NULL;
1464
1465 lp_build_extract_image_sizes(bld,
1466 &bld->float_size_bld,
1467 bld->coord_type,
1468 flt_size,
1469 &width,
1470 &height,
1471 &depth);
1472
1473 /* s = s * width, t = t * height */
1474 *s = lp_build_mul(&bld->coord_bld, *s, width);
1475 if (dims >= 2) {
1476 *t = lp_build_mul(&bld->coord_bld, *t, height);
1477 if (dims >= 3) {
1478 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1479 }
1480 }
1481 }
1482
1483 /**
1484 * Generate new coords and faces for cubemap texels falling off the face.
1485 *
1486 * @param face face (center) of the pixel
1487 * @param x0 lower x coord
1488 * @param x1 higher x coord (must be x0 + 1)
1489 * @param y0 lower y coord
1490 * @param y1 higher y coord (must be x0 + 1)
1491 * @param max_coord texture cube (level) size - 1
1492 * @param next_faces new face values when falling off
1493 * @param next_xcoords new x coord values when falling off
1494 * @param next_ycoords new y coord values when falling off
1495 *
1496 * The arrays hold the new values when under/overflow of
1497 * lower x, higher x, lower y, higher y coord would occur (in this order).
1498 * next_xcoords/next_ycoords have two entries each (for both new lower and
1499 * higher coord).
1500 */
1501 void
lp_build_cube_new_coords(struct lp_build_context * ivec_bld,LLVMValueRef face,LLVMValueRef x0,LLVMValueRef x1,LLVMValueRef y0,LLVMValueRef y1,LLVMValueRef max_coord,LLVMValueRef next_faces[4],LLVMValueRef next_xcoords[4][2],LLVMValueRef next_ycoords[4][2])1502 lp_build_cube_new_coords(struct lp_build_context *ivec_bld,
1503 LLVMValueRef face,
1504 LLVMValueRef x0,
1505 LLVMValueRef x1,
1506 LLVMValueRef y0,
1507 LLVMValueRef y1,
1508 LLVMValueRef max_coord,
1509 LLVMValueRef next_faces[4],
1510 LLVMValueRef next_xcoords[4][2],
1511 LLVMValueRef next_ycoords[4][2])
1512 {
1513 /*
1514 * Lookup tables aren't nice for simd code hence try some logic here.
1515 * (Note that while it would not be necessary to do per-sample (4) lookups
1516 * when using a LUT as it's impossible that texels fall off of positive
1517 * and negative edges simultaneously, it would however be necessary to
1518 * do 2 lookups for corner handling as in this case texels both fall off
1519 * of x and y axes.)
1520 */
1521 /*
1522 * Next faces (for face 012345):
1523 * x < 0.0 : 451110
1524 * x >= 1.0 : 540001
1525 * y < 0.0 : 225422
1526 * y >= 1.0 : 334533
1527 * Hence nfx+ (and nfy+) == nfx- (nfy-) xor 1
1528 * nfx-: face > 1 ? (face == 5 ? 0 : 1) : (4 + face & 1)
1529 * nfy+: face & ~4 > 1 ? face + 2 : 3;
1530 * This could also use pshufb instead, but would need (manually coded)
1531 * ssse3 intrinsic (llvm won't do non-constant shuffles).
1532 */
1533 struct gallivm_state *gallivm = ivec_bld->gallivm;
1534 LLVMValueRef sel, sel_f2345, sel_f23, sel_f2, tmpsel, tmp;
1535 LLVMValueRef faceand1, sel_fand1, maxmx0, maxmx1, maxmy0, maxmy1;
1536 LLVMValueRef c2 = lp_build_const_int_vec(gallivm, ivec_bld->type, 2);
1537 LLVMValueRef c3 = lp_build_const_int_vec(gallivm, ivec_bld->type, 3);
1538 LLVMValueRef c4 = lp_build_const_int_vec(gallivm, ivec_bld->type, 4);
1539 LLVMValueRef c5 = lp_build_const_int_vec(gallivm, ivec_bld->type, 5);
1540
1541 sel = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c5);
1542 tmpsel = lp_build_select(ivec_bld, sel, ivec_bld->zero, ivec_bld->one);
1543 sel_f2345 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, face, ivec_bld->one);
1544 faceand1 = lp_build_and(ivec_bld, face, ivec_bld->one);
1545 tmp = lp_build_add(ivec_bld, faceand1, c4);
1546 next_faces[0] = lp_build_select(ivec_bld, sel_f2345, tmpsel, tmp);
1547 next_faces[1] = lp_build_xor(ivec_bld, next_faces[0], ivec_bld->one);
1548
1549 tmp = lp_build_andnot(ivec_bld, face, c4);
1550 sel_f23 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, tmp, ivec_bld->one);
1551 tmp = lp_build_add(ivec_bld, face, c2);
1552 next_faces[3] = lp_build_select(ivec_bld, sel_f23, tmp, c3);
1553 next_faces[2] = lp_build_xor(ivec_bld, next_faces[3], ivec_bld->one);
1554
1555 /*
1556 * new xcoords (for face 012345):
1557 * x < 0.0 : max max t max-t max max
1558 * x >= 1.0 : 0 0 max-t t 0 0
1559 * y < 0.0 : max 0 max-s s s max-s
1560 * y >= 1.0 : max 0 s max-s s max-s
1561 *
1562 * ncx[1] = face & ~4 > 1 ? (face == 2 ? max-t : t) : 0
1563 * ncx[0] = max - ncx[1]
1564 * ncx[3] = face > 1 ? (face & 1 ? max-s : s) : (face & 1) ? 0 : max
1565 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1566 */
1567 sel_f2 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c2);
1568 maxmy0 = lp_build_sub(ivec_bld, max_coord, y0);
1569 tmp = lp_build_select(ivec_bld, sel_f2, maxmy0, y0);
1570 next_xcoords[1][0] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero);
1571 next_xcoords[0][0] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][0]);
1572 maxmy1 = lp_build_sub(ivec_bld, max_coord, y1);
1573 tmp = lp_build_select(ivec_bld, sel_f2, maxmy1, y1);
1574 next_xcoords[1][1] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero);
1575 next_xcoords[0][1] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][1]);
1576
1577 sel_fand1 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, faceand1, ivec_bld->one);
1578
1579 tmpsel = lp_build_select(ivec_bld, sel_fand1, ivec_bld->zero, max_coord);
1580 maxmx0 = lp_build_sub(ivec_bld, max_coord, x0);
1581 tmp = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0);
1582 next_xcoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1583 tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][0]);
1584 next_xcoords[2][0] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][0]);
1585 maxmx1 = lp_build_sub(ivec_bld, max_coord, x1);
1586 tmp = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1);
1587 next_xcoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1588 tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][1]);
1589 next_xcoords[2][1] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][1]);
1590
1591 /*
1592 * new ycoords (for face 012345):
1593 * x < 0.0 : t t 0 max t t
1594 * x >= 1.0 : t t 0 max t t
1595 * y < 0.0 : max-s s 0 max max 0
1596 * y >= 1.0 : s max-s 0 max 0 max
1597 *
1598 * ncy[0] = face & ~4 > 1 ? (face == 2 ? 0 : max) : t
1599 * ncy[1] = ncy[0]
1600 * ncy[3] = face > 1 ? (face & 1 ? max : 0) : (face & 1) ? max-s : max
1601 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1602 */
1603 tmp = lp_build_select(ivec_bld, sel_f2, ivec_bld->zero, max_coord);
1604 next_ycoords[0][0] = lp_build_select(ivec_bld, sel_f23, tmp, y0);
1605 next_ycoords[1][0] = next_ycoords[0][0];
1606 next_ycoords[0][1] = lp_build_select(ivec_bld, sel_f23, tmp, y1);
1607 next_ycoords[1][1] = next_ycoords[0][1];
1608
1609 tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0);
1610 tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero);
1611 next_ycoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1612 tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][0]);
1613 next_ycoords[2][0] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][0], tmp);
1614 tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1);
1615 tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero);
1616 next_ycoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1617 tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][1]);
1618 next_ycoords[2][1] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][1], tmp);
1619 }
1620
1621
1622 /** Helper used by lp_build_cube_lookup() */
1623 static LLVMValueRef
lp_build_cube_imapos(struct lp_build_context * coord_bld,LLVMValueRef coord)1624 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1625 {
1626 /* ima = +0.5 / abs(coord); */
1627 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1628 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1629 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1630 return ima;
1631 }
1632
1633
1634 /** Helper for doing 3-wise selection.
1635 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1636 */
1637 static LLVMValueRef
lp_build_select3(struct lp_build_context * sel_bld,LLVMValueRef sel0,LLVMValueRef sel1,LLVMValueRef val0,LLVMValueRef val1,LLVMValueRef val2)1638 lp_build_select3(struct lp_build_context *sel_bld,
1639 LLVMValueRef sel0,
1640 LLVMValueRef sel1,
1641 LLVMValueRef val0,
1642 LLVMValueRef val1,
1643 LLVMValueRef val2)
1644 {
1645 LLVMValueRef tmp;
1646 tmp = lp_build_select(sel_bld, sel0, val0, val1);
1647 return lp_build_select(sel_bld, sel1, val2, tmp);
1648 }
1649
1650
1651 /**
1652 * Generate code to do cube face selection and compute per-face texcoords.
1653 */
1654 void
lp_build_cube_lookup(struct lp_build_sample_context * bld,LLVMValueRef * coords,const struct lp_derivatives * derivs_in,LLVMValueRef * rho,struct lp_derivatives * derivs_out,boolean need_derivs)1655 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1656 LLVMValueRef *coords,
1657 const struct lp_derivatives *derivs_in, /* optional */
1658 LLVMValueRef *rho,
1659 struct lp_derivatives *derivs_out, /* optional */
1660 boolean need_derivs)
1661 {
1662 struct lp_build_context *coord_bld = &bld->coord_bld;
1663 LLVMBuilderRef builder = bld->gallivm->builder;
1664 struct gallivm_state *gallivm = bld->gallivm;
1665 LLVMValueRef si, ti, ri;
1666
1667 /*
1668 * Do per-pixel face selection. We cannot however (as we used to do)
1669 * simply calculate the derivs afterwards (which is very bogus for
1670 * explicit derivs btw) because the values would be "random" when
1671 * not all pixels lie on the same face. So what we do here is just
1672 * calculate the derivatives after scaling the coords by the absolute
1673 * value of the inverse major axis, and essentially do rho calculation
1674 * steps as if it were a 3d texture. This is perfect if all pixels hit
1675 * the same face, but not so great at edges, I believe the max error
1676 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1677 * the 3d distance between 2 points on the cube instead of measuring up/down
1678 * the edge). Still this is possibly a win over just selecting the same face
1679 * for all pixels. Unfortunately, something like that doesn't work for
1680 * explicit derivatives.
1681 */
1682 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1683 struct lp_type intctype = cint_bld->type;
1684 LLVMTypeRef coord_vec_type = coord_bld->vec_type;
1685 LLVMTypeRef cint_vec_type = cint_bld->vec_type;
1686 LLVMValueRef as, at, ar, face, face_s, face_t;
1687 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1688 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1689 LLVMValueRef tnegi, rnegi;
1690 LLVMValueRef ma, mai, signma, signmabit, imahalfpos;
1691 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1692 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1693 1LL << (intctype.width - 1));
1694 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1695 intctype.width -1);
1696 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1697 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1698 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1699 LLVMValueRef s = coords[0];
1700 LLVMValueRef t = coords[1];
1701 LLVMValueRef r = coords[2];
1702
1703 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1704 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1705 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1706
1707 /*
1708 * get absolute value (for x/y/z face selection) and sign bit
1709 * (for mirroring minor coords and pos/neg face selection)
1710 * of the original coords.
1711 */
1712 as = lp_build_abs(&bld->coord_bld, s);
1713 at = lp_build_abs(&bld->coord_bld, t);
1714 ar = lp_build_abs(&bld->coord_bld, r);
1715
1716 /*
1717 * major face determination: select x if x > y else select y
1718 * select z if z >= max(x,y) else select previous result
1719 * if some axis are the same we chose z over y, y over x - the
1720 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1721 * wouldn't care could save a select or two if using different
1722 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1723 * same).
1724 */
1725 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1726 maxasat = lp_build_max(coord_bld, as, at);
1727 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1728
1729 if (need_derivs && (derivs_in || (bld->no_quad_lod && bld->no_rho_approx))) {
1730 /*
1731 * XXX: This is really really complex.
1732 * It is a bit overkill to use this for implicit derivatives as well,
1733 * no way this is worth the cost in practice, but seems to be the
1734 * only way for getting accurate and per-pixel lod values.
1735 */
1736 LLVMValueRef ima, imahalf, tmp, ddx[3], ddy[3];
1737 LLVMValueRef madx, mady, madxdivma, madydivma;
1738 LLVMValueRef sdxi, tdxi, rdxi, sdyi, tdyi, rdyi;
1739 LLVMValueRef tdxnegi, rdxnegi, tdynegi, rdynegi;
1740 LLVMValueRef sdxnewx, sdxnewy, sdxnewz, tdxnewx, tdxnewy, tdxnewz;
1741 LLVMValueRef sdynewx, sdynewy, sdynewz, tdynewx, tdynewy, tdynewz;
1742 LLVMValueRef face_sdx, face_tdx, face_sdy, face_tdy;
1743 /*
1744 * s = 1/2 * ( sc / ma + 1)
1745 * t = 1/2 * ( tc / ma + 1)
1746 *
1747 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1748 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1749 *
1750 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1751 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1752 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1753 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1754 */
1755
1756 /* select ma, calculate ima */
1757 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1758 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1759 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1760 ima = lp_build_div(coord_bld, coord_bld->one, ma);
1761 imahalf = lp_build_mul(coord_bld, posHalf, ima);
1762 imahalfpos = lp_build_abs(coord_bld, imahalf);
1763
1764 if (!derivs_in) {
1765 ddx[0] = lp_build_ddx(coord_bld, s);
1766 ddx[1] = lp_build_ddx(coord_bld, t);
1767 ddx[2] = lp_build_ddx(coord_bld, r);
1768 ddy[0] = lp_build_ddy(coord_bld, s);
1769 ddy[1] = lp_build_ddy(coord_bld, t);
1770 ddy[2] = lp_build_ddy(coord_bld, r);
1771 }
1772 else {
1773 ddx[0] = derivs_in->ddx[0];
1774 ddx[1] = derivs_in->ddx[1];
1775 ddx[2] = derivs_in->ddx[2];
1776 ddy[0] = derivs_in->ddy[0];
1777 ddy[1] = derivs_in->ddy[1];
1778 ddy[2] = derivs_in->ddy[2];
1779 }
1780
1781 /* select major derivatives */
1782 madx = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddx[0], ddx[1], ddx[2]);
1783 mady = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddy[0], ddy[1], ddy[2]);
1784
1785 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1786 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1787 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1788
1789 sdxi = LLVMBuildBitCast(builder, ddx[0], cint_vec_type, "");
1790 tdxi = LLVMBuildBitCast(builder, ddx[1], cint_vec_type, "");
1791 rdxi = LLVMBuildBitCast(builder, ddx[2], cint_vec_type, "");
1792
1793 sdyi = LLVMBuildBitCast(builder, ddy[0], cint_vec_type, "");
1794 tdyi = LLVMBuildBitCast(builder, ddy[1], cint_vec_type, "");
1795 rdyi = LLVMBuildBitCast(builder, ddy[2], cint_vec_type, "");
1796
1797 /*
1798 * compute all possible new s/t coords, which does the mirroring,
1799 * and do the same for derivs minor axes.
1800 * snewx = signma * -r;
1801 * tnewx = -t;
1802 * snewy = s;
1803 * tnewy = signma * r;
1804 * snewz = signma * s;
1805 * tnewz = -t;
1806 */
1807 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1808 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1809 tdxnegi = LLVMBuildXor(builder, tdxi, signmask, "");
1810 rdxnegi = LLVMBuildXor(builder, rdxi, signmask, "");
1811 tdynegi = LLVMBuildXor(builder, tdyi, signmask, "");
1812 rdynegi = LLVMBuildXor(builder, rdyi, signmask, "");
1813
1814 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1815 tnewx = tnegi;
1816 sdxnewx = LLVMBuildXor(builder, signmabit, rdxnegi, "");
1817 tdxnewx = tdxnegi;
1818 sdynewx = LLVMBuildXor(builder, signmabit, rdynegi, "");
1819 tdynewx = tdynegi;
1820
1821 snewy = si;
1822 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1823 sdxnewy = sdxi;
1824 tdxnewy = LLVMBuildXor(builder, signmabit, rdxi, "");
1825 sdynewy = sdyi;
1826 tdynewy = LLVMBuildXor(builder, signmabit, rdyi, "");
1827
1828 snewz = LLVMBuildXor(builder, signmabit, si, "");
1829 tnewz = tnegi;
1830 sdxnewz = LLVMBuildXor(builder, signmabit, sdxi, "");
1831 tdxnewz = tdxnegi;
1832 sdynewz = LLVMBuildXor(builder, signmabit, sdyi, "");
1833 tdynewz = tdynegi;
1834
1835 /* select the mirrored values */
1836 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1837 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1838 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1839 face_sdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdxnewx, sdxnewy, sdxnewz);
1840 face_tdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdxnewx, tdxnewy, tdxnewz);
1841 face_sdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdynewx, sdynewy, sdynewz);
1842 face_tdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdynewx, tdynewy, tdynewz);
1843
1844 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1845 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1846 face_sdx = LLVMBuildBitCast(builder, face_sdx, coord_vec_type, "");
1847 face_tdx = LLVMBuildBitCast(builder, face_tdx, coord_vec_type, "");
1848 face_sdy = LLVMBuildBitCast(builder, face_sdy, coord_vec_type, "");
1849 face_tdy = LLVMBuildBitCast(builder, face_tdy, coord_vec_type, "");
1850
1851 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1852 madxdivma = lp_build_mul(coord_bld, madx, ima);
1853 tmp = lp_build_mul(coord_bld, madxdivma, face_s);
1854 tmp = lp_build_sub(coord_bld, face_sdx, tmp);
1855 derivs_out->ddx[0] = lp_build_mul(coord_bld, tmp, imahalf);
1856
1857 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1858 tmp = lp_build_mul(coord_bld, madxdivma, face_t);
1859 tmp = lp_build_sub(coord_bld, face_tdx, tmp);
1860 derivs_out->ddx[1] = lp_build_mul(coord_bld, tmp, imahalf);
1861
1862 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1863 madydivma = lp_build_mul(coord_bld, mady, ima);
1864 tmp = lp_build_mul(coord_bld, madydivma, face_s);
1865 tmp = lp_build_sub(coord_bld, face_sdy, tmp);
1866 derivs_out->ddy[0] = lp_build_mul(coord_bld, tmp, imahalf);
1867
1868 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1869 tmp = lp_build_mul(coord_bld, madydivma, face_t);
1870 tmp = lp_build_sub(coord_bld, face_tdy, tmp);
1871 derivs_out->ddy[1] = lp_build_mul(coord_bld, tmp, imahalf);
1872
1873 signma = LLVMBuildLShr(builder, mai, signshift, "");
1874 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1875
1876 /* project coords */
1877 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1878 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1879
1880 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1881 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1882
1883 return;
1884 }
1885
1886 else if (need_derivs) {
1887 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1888 static const unsigned char swizzle0[] = { /* no-op swizzle */
1889 0, LP_BLD_SWIZZLE_DONTCARE,
1890 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1891 };
1892 static const unsigned char swizzle1[] = {
1893 1, LP_BLD_SWIZZLE_DONTCARE,
1894 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1895 };
1896 static const unsigned char swizzle01[] = { /* no-op swizzle */
1897 0, 1,
1898 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1899 };
1900 static const unsigned char swizzle23[] = {
1901 2, 3,
1902 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1903 };
1904 static const unsigned char swizzle02[] = {
1905 0, 2,
1906 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1907 };
1908
1909 /*
1910 * scale the s/t/r coords pre-select/mirror so we can calculate
1911 * "reasonable" derivs.
1912 */
1913 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1914 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1915 s = lp_build_mul(coord_bld, s, imahalfpos);
1916 t = lp_build_mul(coord_bld, t, imahalfpos);
1917 r = lp_build_mul(coord_bld, r, imahalfpos);
1918
1919 /*
1920 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1921 * know the texture is square which simplifies things (we can omit the
1922 * size mul which happens very early completely here and do it at the
1923 * very end).
1924 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1925 * since the error can get quite big otherwise at edges.
1926 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1927 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1928 */
1929 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1930 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1931
1932 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1933 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1934
1935 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1936 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1937 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1938
1939 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1940 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1941
1942 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1943 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1944 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1945 }
1946
1947 if (!need_derivs) {
1948 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1949 }
1950 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1951 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1952
1953 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1954 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1955 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1956
1957 /*
1958 * compute all possible new s/t coords, which does the mirroring
1959 * snewx = signma * -r;
1960 * tnewx = -t;
1961 * snewy = s;
1962 * tnewy = signma * r;
1963 * snewz = signma * s;
1964 * tnewz = -t;
1965 */
1966 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1967 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1968
1969 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1970 tnewx = tnegi;
1971
1972 snewy = si;
1973 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1974
1975 snewz = LLVMBuildXor(builder, signmabit, si, "");
1976 tnewz = tnegi;
1977
1978 /* select the mirrored values */
1979 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1980 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1981 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1982
1983 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1984 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1985
1986 /* add +1 for neg face */
1987 /* XXX with AVX probably want to use another select here -
1988 * as long as we ensure vblendvps gets used we can actually
1989 * skip the comparison and just use sign as a "mask" directly.
1990 */
1991 signma = LLVMBuildLShr(builder, mai, signshift, "");
1992 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1993
1994 /* project coords */
1995 if (!need_derivs) {
1996 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1997 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1998 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1999 }
2000
2001 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
2002 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
2003 }
2004
2005
2006 /**
2007 * Compute the partial offset of a pixel block along an arbitrary axis.
2008 *
2009 * @param coord coordinate in pixels
2010 * @param stride number of bytes between rows of successive pixel blocks
2011 * @param block_length number of pixels in a pixels block along the coordinate
2012 * axis
2013 * @param out_offset resulting relative offset of the pixel block in bytes
2014 * @param out_subcoord resulting sub-block pixel coordinate
2015 */
2016 void
lp_build_sample_partial_offset(struct lp_build_context * bld,unsigned block_length,LLVMValueRef coord,LLVMValueRef stride,LLVMValueRef * out_offset,LLVMValueRef * out_subcoord)2017 lp_build_sample_partial_offset(struct lp_build_context *bld,
2018 unsigned block_length,
2019 LLVMValueRef coord,
2020 LLVMValueRef stride,
2021 LLVMValueRef *out_offset,
2022 LLVMValueRef *out_subcoord)
2023 {
2024 LLVMBuilderRef builder = bld->gallivm->builder;
2025 LLVMValueRef offset;
2026 LLVMValueRef subcoord;
2027
2028 if (block_length == 1) {
2029 subcoord = bld->zero;
2030 }
2031 else {
2032 /*
2033 * Pixel blocks have power of two dimensions. LLVM should convert the
2034 * rem/div to bit arithmetic.
2035 * TODO: Verify this.
2036 * It does indeed BUT it does transform it to scalar (and back) when doing so
2037 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
2038 * The generated code looks seriously unfunny and is quite expensive.
2039 */
2040 #if 0
2041 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
2042 subcoord = LLVMBuildURem(builder, coord, block_width, "");
2043 coord = LLVMBuildUDiv(builder, coord, block_width, "");
2044 #else
2045 unsigned logbase2 = util_logbase2(block_length);
2046 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
2047 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
2048 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
2049 coord = LLVMBuildLShr(builder, coord, block_shift, "");
2050 #endif
2051 }
2052
2053 offset = lp_build_mul(bld, coord, stride);
2054
2055 assert(out_offset);
2056 assert(out_subcoord);
2057
2058 *out_offset = offset;
2059 *out_subcoord = subcoord;
2060 }
2061
2062
2063 /**
2064 * Compute the offset of a pixel block.
2065 *
2066 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
2067 *
2068 * Returns the relative offset and i,j sub-block coordinates
2069 */
2070 void
lp_build_sample_offset(struct lp_build_context * bld,const struct util_format_description * format_desc,LLVMValueRef x,LLVMValueRef y,LLVMValueRef z,LLVMValueRef y_stride,LLVMValueRef z_stride,LLVMValueRef * out_offset,LLVMValueRef * out_i,LLVMValueRef * out_j)2071 lp_build_sample_offset(struct lp_build_context *bld,
2072 const struct util_format_description *format_desc,
2073 LLVMValueRef x,
2074 LLVMValueRef y,
2075 LLVMValueRef z,
2076 LLVMValueRef y_stride,
2077 LLVMValueRef z_stride,
2078 LLVMValueRef *out_offset,
2079 LLVMValueRef *out_i,
2080 LLVMValueRef *out_j)
2081 {
2082 LLVMValueRef x_stride;
2083 LLVMValueRef offset;
2084
2085 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
2086 format_desc->block.bits/8);
2087
2088 lp_build_sample_partial_offset(bld,
2089 format_desc->block.width,
2090 x, x_stride,
2091 &offset, out_i);
2092
2093 if (y && y_stride) {
2094 LLVMValueRef y_offset;
2095 lp_build_sample_partial_offset(bld,
2096 format_desc->block.height,
2097 y, y_stride,
2098 &y_offset, out_j);
2099 offset = lp_build_add(bld, offset, y_offset);
2100 }
2101 else {
2102 *out_j = bld->zero;
2103 }
2104
2105 if (z && z_stride) {
2106 LLVMValueRef z_offset;
2107 LLVMValueRef k;
2108 lp_build_sample_partial_offset(bld,
2109 1, /* pixel blocks are always 2D */
2110 z, z_stride,
2111 &z_offset, &k);
2112 offset = lp_build_add(bld, offset, z_offset);
2113 }
2114
2115 *out_offset = offset;
2116 }
2117