1 /**************************************************************************
2 *
3 * Copyright 2007-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Rasterization for binned triangles within a tile
30 */
31
32
33
34 /**
35 * Prototype for a 8 plane rasterizer function. Will codegenerate
36 * several of these.
37 *
38 * XXX: Varients for more/fewer planes.
39 * XXX: Need ways of dropping planes as we descend.
40 * XXX: SIMD
41 */
42 static void
TAG(do_block_4)43 TAG(do_block_4)(struct lp_rasterizer_task *task,
44 const struct lp_rast_triangle *tri,
45 const struct lp_rast_plane *plane,
46 int x, int y,
47 const int64_t *c)
48 {
49 int j;
50 #ifndef MULTISAMPLE
51 unsigned mask = 0xffff;
52 #else
53 uint64_t mask = UINT64_MAX;
54 #endif
55
56 for (j = 0; j < NR_PLANES; j++) {
57 #ifndef MULTISAMPLE
58 #ifdef RASTER_64
59 mask &= ~BUILD_MASK_LINEAR(((c[j] - 1) >> (int64_t)FIXED_ORDER),
60 -plane[j].dcdx >> FIXED_ORDER,
61 plane[j].dcdy >> FIXED_ORDER);
62 #else
63 mask &= ~BUILD_MASK_LINEAR((c[j] - 1),
64 -plane[j].dcdx,
65 plane[j].dcdy);
66 #endif
67 #else
68 for (unsigned s = 0; s < 4; s++) {
69 int64_t new_c = (c[j]) + ((IMUL64(task->scene->fixed_sample_pos[s][1], plane[j].dcdy) + IMUL64(task->scene->fixed_sample_pos[s][0], -plane[j].dcdx)) >> FIXED_ORDER);
70 uint32_t build_mask;
71 #ifdef RASTER_64
72 build_mask = BUILD_MASK_LINEAR((int32_t)((new_c - 1) >> (int64_t)FIXED_ORDER),
73 -plane[j].dcdx >> FIXED_ORDER,
74 plane[j].dcdy >> FIXED_ORDER);
75 #else
76 build_mask = BUILD_MASK_LINEAR((new_c - 1),
77 -plane[j].dcdx,
78 plane[j].dcdy);
79 #endif
80 mask &= ~((uint64_t)build_mask << (s * 16));
81 }
82 #endif
83 }
84
85 /* Now pass to the shader:
86 */
87 if (mask)
88 lp_rast_shade_quads_mask_sample(task, &tri->inputs, x, y, mask);
89 }
90
91 /**
92 * Evaluate a 16x16 block of pixels to determine which 4x4 subblocks are in/out
93 * of the triangle's bounds.
94 */
95 static void
TAG(do_block_16)96 TAG(do_block_16)(struct lp_rasterizer_task *task,
97 const struct lp_rast_triangle *tri,
98 const struct lp_rast_plane *plane,
99 int x, int y,
100 const int64_t *c)
101 {
102 unsigned outmask, inmask, partmask, partial_mask;
103 unsigned j;
104
105 outmask = 0; /* outside one or more trivial reject planes */
106 partmask = 0; /* outside one or more trivial accept planes */
107
108 for (j = 0; j < NR_PLANES; j++) {
109 #ifdef RASTER_64
110 int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
111 int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
112 const int32_t cox = plane[j].eo >> FIXED_ORDER;
113 const int32_t ei = (dcdy + dcdx - cox) << 2;
114 const int32_t cox_s = cox << 2;
115 const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
116 int32_t cdiff;
117 cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
118 (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
119 dcdx <<= 2;
120 dcdy <<= 2;
121 #else
122 const int64_t dcdx = -IMUL64(plane[j].dcdx, 4);
123 const int64_t dcdy = IMUL64(plane[j].dcdy, 4);
124 const int64_t cox = IMUL64(plane[j].eo, 4);
125 const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int64_t)plane[j].eo;
126 const int64_t cio = IMUL64(ei, 4) - 1;
127 int32_t co, cdiff;
128 co = c[j] + cox;
129 cdiff = cio - cox;
130 #endif
131
132 BUILD_MASKS(co, cdiff,
133 dcdx, dcdy,
134 &outmask, /* sign bits from c[i][0..15] + cox */
135 &partmask); /* sign bits from c[i][0..15] + cio */
136 }
137
138 if (outmask == 0xffff)
139 return;
140
141 /* Mask of sub-blocks which are inside all trivial accept planes:
142 */
143 inmask = ~partmask & 0xffff;
144
145 /* Mask of sub-blocks which are inside all trivial reject planes,
146 * but outside at least one trivial accept plane:
147 */
148 partial_mask = partmask & ~outmask;
149
150 assert((partial_mask & inmask) == 0);
151
152 LP_COUNT_ADD(nr_empty_4, util_bitcount(0xffff & ~(partial_mask | inmask)));
153
154 /* Iterate over partials:
155 */
156 while (partial_mask) {
157 int i = ffs(partial_mask) - 1;
158 int ix = (i & 3) * 4;
159 int iy = (i >> 2) * 4;
160 int px = x + ix;
161 int py = y + iy;
162 int64_t cx[NR_PLANES];
163
164 partial_mask &= ~(1 << i);
165
166 LP_COUNT(nr_partially_covered_4);
167
168 for (j = 0; j < NR_PLANES; j++)
169 cx[j] = (c[j]
170 - IMUL64(plane[j].dcdx, ix)
171 + IMUL64(plane[j].dcdy, iy));
172
173 TAG(do_block_4)(task, tri, plane, px, py, cx);
174 }
175
176 /* Iterate over fulls:
177 */
178 while (inmask) {
179 int i = ffs(inmask) - 1;
180 int ix = (i & 3) * 4;
181 int iy = (i >> 2) * 4;
182 int px = x + ix;
183 int py = y + iy;
184
185 inmask &= ~(1 << i);
186
187 LP_COUNT(nr_fully_covered_4);
188 block_full_4(task, tri, px, py);
189 }
190 }
191
192
193 /**
194 * Scan the tile in chunks and figure out which pixels to rasterize
195 * for this triangle.
196 */
197 void
TAG(lp_rast_triangle)198 TAG(lp_rast_triangle)(struct lp_rasterizer_task *task,
199 const union lp_rast_cmd_arg arg)
200 {
201 const struct lp_rast_triangle *tri = arg.triangle.tri;
202 unsigned plane_mask = arg.triangle.plane_mask;
203 const struct lp_rast_plane *tri_plane = GET_PLANES(tri);
204 const int x = task->x, y = task->y;
205 struct lp_rast_plane plane[NR_PLANES];
206 int64_t c[NR_PLANES];
207 unsigned outmask, inmask, partmask, partial_mask;
208 unsigned j = 0;
209
210 if (tri->inputs.disable) {
211 /* This triangle was partially binned and has been disabled */
212 return;
213 }
214
215 outmask = 0; /* outside one or more trivial reject planes */
216 partmask = 0; /* outside one or more trivial accept planes */
217
218 while (plane_mask) {
219 int i = ffs(plane_mask) - 1;
220 plane[j] = tri_plane[i];
221 plane_mask &= ~(1 << i);
222 c[j] = plane[j].c + IMUL64(plane[j].dcdy, y) - IMUL64(plane[j].dcdx, x);
223
224 {
225 #ifdef RASTER_64
226 /*
227 * Strip off lower FIXED_ORDER bits. Note that those bits from
228 * dcdx, dcdy, eo are always 0 (by definition).
229 * c values, however, are not. This means that for every
230 * addition of the form c + n*dcdx the lower FIXED_ORDER bits will
231 * NOT change. And those bits are not relevant to the sign bit (which
232 * is only what we need!) that is,
233 * sign(c + n*dcdx) == sign((c >> FIXED_ORDER) + n*(dcdx >> FIXED_ORDER))
234 * This means we can get away with using 32bit math for the most part.
235 * Only tricky part is the -1 adjustment for cdiff.
236 */
237 int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
238 int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
239 const int32_t cox = plane[j].eo >> FIXED_ORDER;
240 const int32_t ei = (dcdy + dcdx - cox) << 4;
241 const int32_t cox_s = cox << 4;
242 const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
243 int32_t cdiff;
244 /*
245 * Plausibility check to ensure the 32bit math works.
246 * Note that within a tile, the max we can move the edge function
247 * is essentially dcdx * TILE_SIZE + dcdy * TILE_SIZE.
248 * TILE_SIZE is 64, dcdx/dcdy are nominally 21 bit (for 8192 max size
249 * and 8 subpixel bits), I'd be happy with 2 bits more too (1 for
250 * increasing fb size to 16384, the required d3d11 value, another one
251 * because I'm not quite sure we can't be _just_ above the max value
252 * here). This gives us 30 bits max - hence if c would exceed that here
253 * that means the plane is either trivial reject for the whole tile
254 * (in which case the tri will not get binned), or trivial accept for
255 * the whole tile (in which case plane_mask will not include it).
256 */
257 assert((c[j] >> (int64_t)FIXED_ORDER) > (int32_t)0xb0000000 &&
258 (c[j] >> (int64_t)FIXED_ORDER) < (int32_t)0x3fffffff);
259 /*
260 * Note the fixup part is constant throughout the tile - thus could
261 * just calculate this and avoid _all_ 64bit math in rasterization
262 * (except exactly this fixup calc).
263 * In fact theoretically could move that even to setup, albeit that
264 * seems tricky (pre-bin certainly can have values larger than 32bit,
265 * and would need to communicate that fixup value through).
266 * And if we want to support msaa, we'd probably don't want to do the
267 * downscaling in setup in any case...
268 */
269 cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
270 (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
271 dcdx <<= 4;
272 dcdy <<= 4;
273 #else
274 const int32_t dcdx = -plane[j].dcdx << 4;
275 const int32_t dcdy = plane[j].dcdy << 4;
276 const int32_t cox = plane[j].eo << 4;
277 const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int32_t)plane[j].eo;
278 const int32_t cio = (ei << 4) - 1;
279 int32_t co, cdiff;
280 co = c[j] + cox;
281 cdiff = cio - cox;
282 #endif
283 BUILD_MASKS(co, cdiff,
284 dcdx, dcdy,
285 &outmask, /* sign bits from c[i][0..15] + cox */
286 &partmask); /* sign bits from c[i][0..15] + cio */
287 }
288
289 j++;
290 }
291
292 if (outmask == 0xffff)
293 return;
294
295 /* Mask of sub-blocks which are inside all trivial accept planes:
296 */
297 inmask = ~partmask & 0xffff;
298
299 /* Mask of sub-blocks which are inside all trivial reject planes,
300 * but outside at least one trivial accept plane:
301 */
302 partial_mask = partmask & ~outmask;
303
304 assert((partial_mask & inmask) == 0);
305
306 LP_COUNT_ADD(nr_empty_16, util_bitcount(0xffff & ~(partial_mask | inmask)));
307
308 /* Iterate over partials:
309 */
310 while (partial_mask) {
311 int i = ffs(partial_mask) - 1;
312 int ix = (i & 3) * 16;
313 int iy = (i >> 2) * 16;
314 int px = x + ix;
315 int py = y + iy;
316 int64_t cx[NR_PLANES];
317
318 for (j = 0; j < NR_PLANES; j++)
319 cx[j] = (c[j]
320 - IMUL64(plane[j].dcdx, ix)
321 + IMUL64(plane[j].dcdy, iy));
322
323 partial_mask &= ~(1 << i);
324
325 LP_COUNT(nr_partially_covered_16);
326 TAG(do_block_16)(task, tri, plane, px, py, cx);
327 }
328
329 /* Iterate over fulls:
330 */
331 while (inmask) {
332 int i = ffs(inmask) - 1;
333 int ix = (i & 3) * 16;
334 int iy = (i >> 2) * 16;
335 int px = x + ix;
336 int py = y + iy;
337
338 inmask &= ~(1 << i);
339
340 LP_COUNT(nr_fully_covered_16);
341 block_full_16(task, tri, px, py);
342 }
343 }
344
345 #if defined(PIPE_ARCH_SSE) && defined(TRI_16)
346 /* XXX: special case this when intersection is not required.
347 * - tile completely within bbox,
348 * - bbox completely within tile.
349 */
350 void
TRI_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)351 TRI_16(struct lp_rasterizer_task *task,
352 const union lp_rast_cmd_arg arg)
353 {
354 const struct lp_rast_triangle *tri = arg.triangle.tri;
355 const struct lp_rast_plane *plane = GET_PLANES(tri);
356 unsigned mask = arg.triangle.plane_mask;
357 unsigned outmask, partial_mask;
358 unsigned j;
359 __m128i cstep4[NR_PLANES][4];
360
361 int x = (mask & 0xff);
362 int y = (mask >> 8);
363
364 outmask = 0; /* outside one or more trivial reject planes */
365
366 if (x + 12 >= 64) {
367 int i = ((x + 12) - 64) / 4;
368 outmask |= right_mask_tab[i];
369 }
370
371 if (y + 12 >= 64) {
372 int i = ((y + 12) - 64) / 4;
373 outmask |= bottom_mask_tab[i];
374 }
375
376 x += task->x;
377 y += task->y;
378
379 for (j = 0; j < NR_PLANES; j++) {
380 const int dcdx = -plane[j].dcdx * 4;
381 const int dcdy = plane[j].dcdy * 4;
382 __m128i xdcdy = _mm_set1_epi32(dcdy);
383
384 cstep4[j][0] = _mm_setr_epi32(0, dcdx, dcdx*2, dcdx*3);
385 cstep4[j][1] = _mm_add_epi32(cstep4[j][0], xdcdy);
386 cstep4[j][2] = _mm_add_epi32(cstep4[j][1], xdcdy);
387 cstep4[j][3] = _mm_add_epi32(cstep4[j][2], xdcdy);
388
389 {
390 const int c = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
391 const int cox = plane[j].eo * 4;
392
393 outmask |= sign_bits4(cstep4[j], c + cox);
394 }
395 }
396
397 if (outmask == 0xffff)
398 return;
399
400
401 /* Mask of sub-blocks which are inside all trivial reject planes,
402 * but outside at least one trivial accept plane:
403 */
404 partial_mask = 0xffff & ~outmask;
405
406 /* Iterate over partials:
407 */
408 while (partial_mask) {
409 int i = ffs(partial_mask) - 1;
410 int ix = (i & 3) * 4;
411 int iy = (i >> 2) * 4;
412 int px = x + ix;
413 int py = y + iy;
414 unsigned mask = 0xffff;
415
416 partial_mask &= ~(1 << i);
417
418 for (j = 0; j < NR_PLANES; j++) {
419 const int cx = (plane[j].c - 1
420 - plane[j].dcdx * px
421 + plane[j].dcdy * py) * 4;
422
423 mask &= ~sign_bits4(cstep4[j], cx);
424 }
425
426 if (mask)
427 lp_rast_shade_quads_mask(task, &tri->inputs, px, py, mask);
428 }
429 }
430 #endif
431
432 #if defined(PIPE_ARCH_SSE) && defined(TRI_4)
433 void
TRI_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)434 TRI_4(struct lp_rasterizer_task *task,
435 const union lp_rast_cmd_arg arg)
436 {
437 const struct lp_rast_triangle *tri = arg.triangle.tri;
438 const struct lp_rast_plane *plane = GET_PLANES(tri);
439 unsigned mask = arg.triangle.plane_mask;
440 const int x = task->x + (mask & 0xff);
441 const int y = task->y + (mask >> 8);
442 unsigned j;
443
444 /* Iterate over partials:
445 */
446 {
447 unsigned mask = 0xffff;
448
449 for (j = 0; j < NR_PLANES; j++) {
450 const int cx = (plane[j].c
451 - plane[j].dcdx * x
452 + plane[j].dcdy * y);
453
454 const int dcdx = -plane[j].dcdx;
455 const int dcdy = plane[j].dcdy;
456 __m128i xdcdy = _mm_set1_epi32(dcdy);
457
458 __m128i cstep0 = _mm_setr_epi32(cx, cx + dcdx, cx + dcdx*2, cx + dcdx*3);
459 __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
460 __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
461 __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
462
463 __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
464 __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
465 __m128i result = _mm_packs_epi16(cstep01, cstep23);
466
467 /* Extract the sign bits
468 */
469 mask &= ~_mm_movemask_epi8(result);
470 }
471
472 if (mask)
473 lp_rast_shade_quads_mask(task, &tri->inputs, x, y, mask);
474 }
475 }
476 #endif
477
478
479
480 #undef TAG
481 #undef TRI_4
482 #undef TRI_16
483 #undef NR_PLANES
484
485