• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007-2010 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /*
29  * Rasterization for binned triangles within a tile
30  */
31 
32 
33 
34 /**
35  * Prototype for a 8 plane rasterizer function.  Will codegenerate
36  * several of these.
37  *
38  * XXX: Varients for more/fewer planes.
39  * XXX: Need ways of dropping planes as we descend.
40  * XXX: SIMD
41  */
42 static void
TAG(do_block_4)43 TAG(do_block_4)(struct lp_rasterizer_task *task,
44                 const struct lp_rast_triangle *tri,
45                 const struct lp_rast_plane *plane,
46                 int x, int y,
47                 const int64_t *c)
48 {
49 #ifndef MULTISAMPLE
50    unsigned mask = 0xffff;
51 #else
52    uint64_t mask = UINT64_MAX;
53 #endif
54 
55    for (unsigned j = 0; j < NR_PLANES; j++) {
56 #ifndef MULTISAMPLE
57 #ifdef RASTER_64
58       mask &= ~BUILD_MASK_LINEAR(((c[j] - 1) >> (int64_t)FIXED_ORDER),
59                                  -plane[j].dcdx >> FIXED_ORDER,
60                                  plane[j].dcdy >> FIXED_ORDER);
61 #else
62       mask &= ~BUILD_MASK_LINEAR((c[j] - 1),
63                                  -plane[j].dcdx,
64                                  plane[j].dcdy);
65 #endif
66 #else
67       for (unsigned s = 0; s < 4; s++) {
68          int64_t new_c = (c[j]) + ((IMUL64(task->scene->fixed_sample_pos[s][1], plane[j].dcdy) + IMUL64(task->scene->fixed_sample_pos[s][0], -plane[j].dcdx)) >> FIXED_ORDER);
69          uint32_t build_mask;
70 #ifdef RASTER_64
71          build_mask = BUILD_MASK_LINEAR((int32_t)((new_c - 1) >> (int64_t)FIXED_ORDER),
72                                         -plane[j].dcdx >> FIXED_ORDER,
73                                         plane[j].dcdy >> FIXED_ORDER);
74 #else
75          build_mask = BUILD_MASK_LINEAR((new_c - 1),
76                                         -plane[j].dcdx,
77                                         plane[j].dcdy);
78 #endif
79          mask &= ~((uint64_t)build_mask << (s * 16));
80       }
81 #endif
82    }
83 
84    /* Now pass to the shader:
85     */
86    if (mask)
87       lp_rast_shade_quads_mask_sample(task, &tri->inputs, x, y, mask);
88 }
89 
90 
91 /**
92  * Evaluate a 16x16 block of pixels to determine which 4x4 subblocks are in/out
93  * of the triangle's bounds.
94  */
95 static void
TAG(do_block_16)96 TAG(do_block_16)(struct lp_rasterizer_task *task,
97                  const struct lp_rast_triangle *tri,
98                  const struct lp_rast_plane *plane,
99                  int x, int y,
100                  const int64_t *c)
101 {
102    unsigned outmask = 0;      /* outside one or more trivial reject planes */
103    unsigned partmask = 0;     /* outside one or more trivial accept planes */
104 
105    for (unsigned j = 0; j < NR_PLANES; j++) {
106 #ifdef RASTER_64
107       int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
108       int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
109       const int32_t cox = plane[j].eo >> FIXED_ORDER;
110       const int32_t ei = (dcdy + dcdx - cox) << 2;
111       const int32_t cox_s = cox << 2;
112       const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
113       int32_t cdiff;
114       cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
115                             (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
116       dcdx <<= 2;
117       dcdy <<= 2;
118 #else
119       const int64_t dcdx = -IMUL64(plane[j].dcdx, 4);
120       const int64_t dcdy = IMUL64(plane[j].dcdy, 4);
121       const int64_t cox = IMUL64(plane[j].eo, 4);
122       const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int64_t)plane[j].eo;
123       const int64_t cio = IMUL64(ei, 4) - 1;
124       int32_t co, cdiff;
125       co = c[j] + cox;
126       cdiff = cio - cox;
127 #endif
128 
129       BUILD_MASKS(co, cdiff,
130                   dcdx, dcdy,
131                   &outmask,   /* sign bits from c[i][0..15] + cox */
132                   &partmask); /* sign bits from c[i][0..15] + cio */
133    }
134 
135    if (outmask == 0xffff)
136       return;
137 
138    /* Mask of sub-blocks which are inside all trivial accept planes:
139     */
140    unsigned inmask = ~partmask & 0xffff;
141 
142    /* Mask of sub-blocks which are inside all trivial reject planes,
143     * but outside at least one trivial accept plane:
144     */
145    unsigned partial_mask = partmask & ~outmask;
146 
147    assert((partial_mask & inmask) == 0);
148 
149    LP_COUNT_ADD(nr_empty_4, util_bitcount(0xffff & ~(partial_mask | inmask)));
150 
151    /* Iterate over partials:
152     */
153    while (partial_mask) {
154       int i = ffs(partial_mask) - 1;
155       int ix = (i & 3) * 4;
156       int iy = (i >> 2) * 4;
157       int px = x + ix;
158       int py = y + iy;
159       int64_t cx[NR_PLANES];
160 
161       partial_mask &= ~(1 << i);
162 
163       LP_COUNT(nr_partially_covered_4);
164 
165       for (unsigned j = 0; j < NR_PLANES; j++) {
166          cx[j] = (c[j]
167                   - IMUL64(plane[j].dcdx, ix)
168                   + IMUL64(plane[j].dcdy, iy));
169       }
170 
171       TAG(do_block_4)(task, tri, plane, px, py, cx);
172    }
173 
174    /* Iterate over fulls:
175     */
176    while (inmask) {
177       int i = ffs(inmask) - 1;
178       int ix = (i & 3) * 4;
179       int iy = (i >> 2) * 4;
180       int px = x + ix;
181       int py = y + iy;
182 
183       inmask &= ~(1 << i);
184 
185       LP_COUNT(nr_fully_covered_4);
186       block_full_4(task, tri, px, py);
187    }
188 }
189 
190 
191 /**
192  * Scan the tile in chunks and figure out which pixels to rasterize
193  * for this triangle.
194  */
195 void
TAG(lp_rast_triangle)196 TAG(lp_rast_triangle)(struct lp_rasterizer_task *task,
197                       const union lp_rast_cmd_arg arg)
198 {
199    const struct lp_rast_triangle *tri = arg.triangle.tri;
200    unsigned plane_mask = arg.triangle.plane_mask;
201    const struct lp_rast_plane *tri_plane = GET_PLANES(tri);
202    const int x = task->x, y = task->y;
203    struct lp_rast_plane plane[NR_PLANES];
204    int64_t c[NR_PLANES];
205    unsigned outmask, inmask, partmask, partial_mask;
206    unsigned j = 0;
207 
208    if (tri->inputs.disable) {
209       /* This triangle was partially binned and has been disabled */
210       return;
211    }
212 
213    outmask = 0;                 /* outside one or more trivial reject planes */
214    partmask = 0;                /* outside one or more trivial accept planes */
215 
216    while (plane_mask) {
217       int i = ffs(plane_mask) - 1;
218       plane[j] = tri_plane[i];
219       plane_mask &= ~(1 << i);
220       c[j] = plane[j].c + IMUL64(plane[j].dcdy, y) - IMUL64(plane[j].dcdx, x);
221 
222       {
223 #ifdef RASTER_64
224          /*
225           * Strip off lower FIXED_ORDER bits. Note that those bits from
226           * dcdx, dcdy, eo are always 0 (by definition).
227           * c values, however, are not. This means that for every
228           * addition of the form c + n*dcdx the lower FIXED_ORDER bits will
229           * NOT change. And those bits are not relevant to the sign bit (which
230           * is only what we need!) that is,
231           * sign(c + n*dcdx) == sign((c >> FIXED_ORDER) + n*(dcdx >> FIXED_ORDER))
232           * This means we can get away with using 32bit math for the most part.
233           * Only tricky part is the -1 adjustment for cdiff.
234           */
235          int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
236          int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
237          const int32_t cox = plane[j].eo >> FIXED_ORDER;
238          const int32_t ei = (dcdy + dcdx - cox) << 4;
239          const int32_t cox_s = cox << 4;
240          const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
241          int32_t cdiff;
242          /*
243           * Plausibility check to ensure the 32bit math works.
244           * Note that within a tile, the max we can move the edge function
245           * is essentially dcdx * TILE_SIZE + dcdy * TILE_SIZE.
246           * TILE_SIZE is 64, dcdx/dcdy are nominally 21 bit (for 8192 max size
247           * and 8 subpixel bits), I'd be happy with 2 bits more too (1 for
248           * increasing fb size to 16384, the required d3d11 value, another one
249           * because I'm not quite sure we can't be _just_ above the max value
250           * here). This gives us 30 bits max - hence if c would exceed that here
251           * that means the plane is either trivial reject for the whole tile
252           * (in which case the tri will not get binned), or trivial accept for
253           * the whole tile (in which case plane_mask will not include it).
254           */
255 #if 0
256          assert((c[j] >> (int64_t)FIXED_ORDER) > (int32_t)0xb0000000 &&
257                 (c[j] >> (int64_t)FIXED_ORDER) < (int32_t)0x3fffffff);
258 #endif
259          /*
260           * Note the fixup part is constant throughout the tile - thus could
261           * just calculate this and avoid _all_ 64bit math in rasterization
262           * (except exactly this fixup calc).
263           * In fact theoretically could move that even to setup, albeit that
264           * seems tricky (pre-bin certainly can have values larger than 32bit,
265           * and would need to communicate that fixup value through).
266           * And if we want to support msaa, we'd probably don't want to do the
267           * downscaling in setup in any case...
268           */
269          cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
270                                (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
271          dcdx <<= 4;
272          dcdy <<= 4;
273 #else
274          const int32_t dcdx = -plane[j].dcdx << 4;
275          const int32_t dcdy = plane[j].dcdy << 4;
276          const int32_t cox = plane[j].eo << 4;
277          const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int32_t)plane[j].eo;
278          const int32_t cio = (ei << 4) - 1;
279          int32_t co, cdiff;
280          co = c[j] + cox;
281          cdiff = cio - cox;
282 #endif
283          BUILD_MASKS(co, cdiff,
284                      dcdx, dcdy,
285                      &outmask,   /* sign bits from c[i][0..15] + cox */
286                      &partmask); /* sign bits from c[i][0..15] + cio */
287       }
288 
289       j++;
290    }
291 
292    if (outmask == 0xffff)
293       return;
294 
295    /* Mask of sub-blocks which are inside all trivial accept planes:
296     */
297    inmask = ~partmask & 0xffff;
298 
299    /* Mask of sub-blocks which are inside all trivial reject planes,
300     * but outside at least one trivial accept plane:
301     */
302    partial_mask = partmask & ~outmask;
303 
304    assert((partial_mask & inmask) == 0);
305 
306    LP_COUNT_ADD(nr_empty_16, util_bitcount(0xffff & ~(partial_mask | inmask)));
307 
308    /* Iterate over partials:
309     */
310    while (partial_mask) {
311       int i = ffs(partial_mask) - 1;
312       int ix = (i & 3) * 16;
313       int iy = (i >> 2) * 16;
314       int px = x + ix;
315       int py = y + iy;
316       int64_t cx[NR_PLANES];
317 
318       for (j = 0; j < NR_PLANES; j++)
319          cx[j] = (c[j]
320                   - IMUL64(plane[j].dcdx, ix)
321                   + IMUL64(plane[j].dcdy, iy));
322 
323       partial_mask &= ~(1 << i);
324 
325       LP_COUNT(nr_partially_covered_16);
326       TAG(do_block_16)(task, tri, plane, px, py, cx);
327    }
328 
329    /* Iterate over fulls:
330     */
331    while (inmask) {
332       int i = ffs(inmask) - 1;
333       int ix = (i & 3) * 16;
334       int iy = (i >> 2) * 16;
335       int px = x + ix;
336       int py = y + iy;
337 
338       inmask &= ~(1 << i);
339 
340       LP_COUNT(nr_fully_covered_16);
341       block_full_16(task, tri, px, py);
342    }
343 }
344 
345 
346 #if defined(PIPE_ARCH_SSE) && defined(TRI_16)
347 /* XXX: special case this when intersection is not required.
348  *      - tile completely within bbox,
349  *      - bbox completely within tile.
350  */
351 void
TRI_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)352 TRI_16(struct lp_rasterizer_task *task,
353        const union lp_rast_cmd_arg arg)
354 {
355    const struct lp_rast_triangle *tri = arg.triangle.tri;
356    const struct lp_rast_plane *plane = GET_PLANES(tri);
357    unsigned mask = arg.triangle.plane_mask;
358    __m128i cstep4[NR_PLANES][4];
359    int x = (mask & 0xff);
360    int y = (mask >> 8);
361    unsigned outmask = 0;    /* outside one or more trivial reject planes */
362 
363    if (x + 12 >= 64) {
364       int i = ((x + 12) - 64) / 4;
365       outmask |= right_mask_tab[i];
366    }
367 
368    if (y + 12 >= 64) {
369       int i = ((y + 12) - 64) / 4;
370       outmask |= bottom_mask_tab[i];
371    }
372 
373    x += task->x;
374    y += task->y;
375 
376    for (unsigned j = 0; j < NR_PLANES; j++) {
377       const int dcdx = -plane[j].dcdx * 4;
378       const int dcdy = plane[j].dcdy * 4;
379       __m128i xdcdy = _mm_set1_epi32(dcdy);
380 
381       cstep4[j][0] = _mm_setr_epi32(0, dcdx, dcdx*2, dcdx*3);
382       cstep4[j][1] = _mm_add_epi32(cstep4[j][0], xdcdy);
383       cstep4[j][2] = _mm_add_epi32(cstep4[j][1], xdcdy);
384       cstep4[j][3] = _mm_add_epi32(cstep4[j][2], xdcdy);
385 
386       {
387          const int c = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
388          const int cox = plane[j].eo * 4;
389 
390          outmask |= sign_bits4(cstep4[j], c + cox);
391       }
392    }
393 
394    if (outmask == 0xffff)
395       return;
396 
397 
398    /* Mask of sub-blocks which are inside all trivial reject planes,
399     * but outside at least one trivial accept plane:
400     */
401    unsigned partial_mask = 0xffff & ~outmask;
402 
403    /* Iterate over partials:
404     */
405    while (partial_mask) {
406       int i = ffs(partial_mask) - 1;
407       int ix = (i & 3) * 4;
408       int iy = (i >> 2) * 4;
409       int px = x + ix;
410       int py = y + iy;
411       unsigned mask = 0xffff;
412 
413       partial_mask &= ~(1 << i);
414 
415       for (unsigned j = 0; j < NR_PLANES; j++) {
416          const int cx = (plane[j].c - 1
417                          - plane[j].dcdx * px
418                          + plane[j].dcdy * py) * 4;
419 
420          mask &= ~sign_bits4(cstep4[j], cx);
421       }
422 
423       if (mask)
424          lp_rast_shade_quads_mask(task, &tri->inputs, px, py, mask);
425    }
426 }
427 #endif
428 
429 
430 #if defined(PIPE_ARCH_SSE) && defined(TRI_4)
431 void
TRI_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)432 TRI_4(struct lp_rasterizer_task *task,
433       const union lp_rast_cmd_arg arg)
434 {
435    const struct lp_rast_triangle *tri = arg.triangle.tri;
436    const struct lp_rast_plane *plane = GET_PLANES(tri);
437    unsigned mask = arg.triangle.plane_mask;
438    const int x = task->x + (mask & 0xff);
439    const int y = task->y + (mask >> 8);
440 
441    /* Iterate over partials:
442     */
443    unsigned mask = 0xffff;
444 
445    for (unsigned j = 0; j < NR_PLANES; j++) {
446       const int cx = (plane[j].c
447                       - plane[j].dcdx * x
448                       + plane[j].dcdy * y);
449 
450       const int dcdx = -plane[j].dcdx;
451       const int dcdy = plane[j].dcdy;
452       __m128i xdcdy = _mm_set1_epi32(dcdy);
453 
454       __m128i cstep0 = _mm_setr_epi32(cx, cx + dcdx, cx + dcdx*2, cx + dcdx*3);
455       __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
456       __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
457       __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
458 
459       __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
460       __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
461       __m128i result = _mm_packs_epi16(cstep01, cstep23);
462 
463       /* Extract the sign bits
464        */
465       mask &= ~_mm_movemask_epi8(result);
466    }
467 
468    if (mask)
469       lp_rast_shade_quads_mask(task, &tri->inputs, x, y, mask);
470 }
471 #endif
472 
473 
474 #undef TAG
475 #undef TRI_4
476 #undef TRI_16
477 #undef NR_PLANES
478