• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2007-2009 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /*
29  * Rasterization for binned triangles within a tile
30  */
31 
32 #include <limits.h>
33 #include "util/u_math.h"
34 #include "lp_debug.h"
35 #include "lp_perf.h"
36 #include "lp_rast_priv.h"
37 
38 /**
39  * Shade all pixels in a 4x4 block.
40  */
41 static void
block_full_4(struct lp_rasterizer_task * task,const struct lp_rast_triangle * tri,int x,int y)42 block_full_4(struct lp_rasterizer_task *task,
43              const struct lp_rast_triangle *tri,
44              int x, int y)
45 {
46    lp_rast_shade_quads_all(task, &tri->inputs, x, y);
47 }
48 
49 
50 /**
51  * Shade all pixels in a 16x16 block.
52  */
53 static void
block_full_16(struct lp_rasterizer_task * task,const struct lp_rast_triangle * tri,int x,int y)54 block_full_16(struct lp_rasterizer_task *task,
55               const struct lp_rast_triangle *tri,
56               int x, int y)
57 {
58    unsigned ix, iy;
59    assert(x % 16 == 0);
60    assert(y % 16 == 0);
61    for (iy = 0; iy < 16; iy += 4)
62       for (ix = 0; ix < 16; ix += 4)
63 	 block_full_4(task, tri, x + ix, y + iy);
64 }
65 
66 static inline unsigned
build_mask_linear(int32_t c,int32_t dcdx,int32_t dcdy)67 build_mask_linear(int32_t c, int32_t dcdx, int32_t dcdy)
68 {
69    unsigned mask = 0;
70 
71    int32_t c0 = c;
72    int32_t c1 = c0 + dcdy;
73    int32_t c2 = c1 + dcdy;
74    int32_t c3 = c2 + dcdy;
75 
76    mask |= ((c0 + 0 * dcdx) >> 31) & (1 << 0);
77    mask |= ((c0 + 1 * dcdx) >> 31) & (1 << 1);
78    mask |= ((c0 + 2 * dcdx) >> 31) & (1 << 2);
79    mask |= ((c0 + 3 * dcdx) >> 31) & (1 << 3);
80    mask |= ((c1 + 0 * dcdx) >> 31) & (1 << 4);
81    mask |= ((c1 + 1 * dcdx) >> 31) & (1 << 5);
82    mask |= ((c1 + 2 * dcdx) >> 31) & (1 << 6);
83    mask |= ((c1 + 3 * dcdx) >> 31) & (1 << 7);
84    mask |= ((c2 + 0 * dcdx) >> 31) & (1 << 8);
85    mask |= ((c2 + 1 * dcdx) >> 31) & (1 << 9);
86    mask |= ((c2 + 2 * dcdx) >> 31) & (1 << 10);
87    mask |= ((c2 + 3 * dcdx) >> 31) & (1 << 11);
88    mask |= ((c3 + 0 * dcdx) >> 31) & (1 << 12);
89    mask |= ((c3 + 1 * dcdx) >> 31) & (1 << 13);
90    mask |= ((c3 + 2 * dcdx) >> 31) & (1 << 14);
91    mask |= ((c3 + 3 * dcdx) >> 31) & (1 << 15);
92 
93    return mask;
94 }
95 
96 
97 static inline void
build_masks(int32_t c,int32_t cdiff,int32_t dcdx,int32_t dcdy,unsigned * outmask,unsigned * partmask)98 build_masks(int32_t c,
99             int32_t cdiff,
100             int32_t dcdx,
101             int32_t dcdy,
102             unsigned *outmask,
103             unsigned *partmask)
104 {
105    *outmask |= build_mask_linear(c, dcdx, dcdy);
106    *partmask |= build_mask_linear(c + cdiff, dcdx, dcdy);
107 }
108 
109 void
lp_rast_triangle_3_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)110 lp_rast_triangle_3_16(struct lp_rasterizer_task *task,
111                       const union lp_rast_cmd_arg arg)
112 {
113    union lp_rast_cmd_arg arg2;
114    arg2.triangle.tri = arg.triangle.tri;
115    arg2.triangle.plane_mask = (1<<3)-1;
116    lp_rast_triangle_3(task, arg2);
117 }
118 
119 void
lp_rast_triangle_3_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)120 lp_rast_triangle_3_4(struct lp_rasterizer_task *task,
121                       const union lp_rast_cmd_arg arg)
122 {
123    lp_rast_triangle_3_16(task, arg);
124 }
125 
126 void
lp_rast_triangle_4_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)127 lp_rast_triangle_4_16(struct lp_rasterizer_task *task,
128                       const union lp_rast_cmd_arg arg)
129 {
130    union lp_rast_cmd_arg arg2;
131    arg2.triangle.tri = arg.triangle.tri;
132    arg2.triangle.plane_mask = (1<<4)-1;
133    lp_rast_triangle_4(task, arg2);
134 }
135 
136 void
lp_rast_triangle_ms_3_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)137 lp_rast_triangle_ms_3_16(struct lp_rasterizer_task *task,
138                       const union lp_rast_cmd_arg arg)
139 {
140    union lp_rast_cmd_arg arg2;
141    arg2.triangle.tri = arg.triangle.tri;
142    arg2.triangle.plane_mask = (1<<3)-1;
143    lp_rast_triangle_ms_3(task, arg2);
144 }
145 
146 void
lp_rast_triangle_ms_3_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)147 lp_rast_triangle_ms_3_4(struct lp_rasterizer_task *task,
148                       const union lp_rast_cmd_arg arg)
149 {
150    lp_rast_triangle_ms_3_16(task, arg);
151 }
152 
153 void
lp_rast_triangle_ms_4_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)154 lp_rast_triangle_ms_4_16(struct lp_rasterizer_task *task,
155                       const union lp_rast_cmd_arg arg)
156 {
157    union lp_rast_cmd_arg arg2;
158    arg2.triangle.tri = arg.triangle.tri;
159    arg2.triangle.plane_mask = (1<<4)-1;
160    lp_rast_triangle_ms_4(task, arg2);
161 }
162 
163 #if defined(PIPE_ARCH_SSE)
164 
165 #include <emmintrin.h>
166 #include "util/u_sse.h"
167 
168 
169 static inline void
build_masks_sse(int c,int cdiff,int dcdx,int dcdy,unsigned * outmask,unsigned * partmask)170 build_masks_sse(int c,
171                 int cdiff,
172                 int dcdx,
173                 int dcdy,
174                 unsigned *outmask,
175                 unsigned *partmask)
176 {
177    __m128i cstep0 = _mm_setr_epi32(c, c+dcdx, c+dcdx*2, c+dcdx*3);
178    __m128i xdcdy = _mm_set1_epi32(dcdy);
179 
180    /* Get values across the quad
181     */
182    __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
183    __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
184    __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
185 
186    {
187       __m128i cstep01, cstep23, result;
188 
189       cstep01 = _mm_packs_epi32(cstep0, cstep1);
190       cstep23 = _mm_packs_epi32(cstep2, cstep3);
191       result = _mm_packs_epi16(cstep01, cstep23);
192 
193       *outmask |= _mm_movemask_epi8(result);
194    }
195 
196 
197    {
198       __m128i cio4 = _mm_set1_epi32(cdiff);
199       __m128i cstep01, cstep23, result;
200 
201       cstep0 = _mm_add_epi32(cstep0, cio4);
202       cstep1 = _mm_add_epi32(cstep1, cio4);
203       cstep2 = _mm_add_epi32(cstep2, cio4);
204       cstep3 = _mm_add_epi32(cstep3, cio4);
205 
206       cstep01 = _mm_packs_epi32(cstep0, cstep1);
207       cstep23 = _mm_packs_epi32(cstep2, cstep3);
208       result = _mm_packs_epi16(cstep01, cstep23);
209 
210       *partmask |= _mm_movemask_epi8(result);
211    }
212 }
213 
214 
215 static inline unsigned
build_mask_linear_sse(int c,int dcdx,int dcdy)216 build_mask_linear_sse(int c, int dcdx, int dcdy)
217 {
218    __m128i cstep0 = _mm_setr_epi32(c, c+dcdx, c+dcdx*2, c+dcdx*3);
219    __m128i xdcdy = _mm_set1_epi32(dcdy);
220 
221    /* Get values across the quad
222     */
223    __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
224    __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
225    __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
226 
227    /* pack pairs of results into epi16
228     */
229    __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
230    __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
231 
232    /* pack into epi8, preserving sign bits
233     */
234    __m128i result = _mm_packs_epi16(cstep01, cstep23);
235 
236    /* extract sign bits to create mask
237     */
238    return _mm_movemask_epi8(result);
239 }
240 
241 static inline unsigned
sign_bits4(const __m128i * cstep,int cdiff)242 sign_bits4(const __m128i *cstep, int cdiff)
243 {
244 
245    /* Adjust the step values
246     */
247    __m128i cio4 = _mm_set1_epi32(cdiff);
248    __m128i cstep0 = _mm_add_epi32(cstep[0], cio4);
249    __m128i cstep1 = _mm_add_epi32(cstep[1], cio4);
250    __m128i cstep2 = _mm_add_epi32(cstep[2], cio4);
251    __m128i cstep3 = _mm_add_epi32(cstep[3], cio4);
252 
253    /* Pack down to epi8
254     */
255    __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
256    __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
257    __m128i result = _mm_packs_epi16(cstep01, cstep23);
258 
259    /* Extract the sign bits
260     */
261    return _mm_movemask_epi8(result);
262 }
263 
264 
265 #define NR_PLANES 3
266 
267 void
lp_rast_triangle_32_3_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)268 lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
269                          const union lp_rast_cmd_arg arg)
270 {
271    const struct lp_rast_triangle *tri = arg.triangle.tri;
272    const struct lp_rast_plane *plane = GET_PLANES(tri);
273    int x = (arg.triangle.plane_mask & 0xff) + task->x;
274    int y = (arg.triangle.plane_mask >> 8) + task->y;
275    unsigned i, j;
276 
277    struct { unsigned mask:16; unsigned i:8; unsigned j:8; } out[16];
278    unsigned nr = 0;
279 
280    /* p0 and p2 are aligned, p1 is not (plane size 24 bytes). */
281    __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* clo, chi, dcdx, dcdy */
282    __m128i p1 = _mm_loadu_si128((__m128i *)&plane[1]);
283    __m128i p2 = _mm_load_si128((__m128i *)&plane[2]);
284    __m128i zero = _mm_setzero_si128();
285 
286    __m128i c, dcdx, dcdy, rej4;
287    __m128i dcdx_neg_mask, dcdy_neg_mask;
288    __m128i dcdx2, dcdx3;
289 
290    __m128i span_0;                /* 0,dcdx,2dcdx,3dcdx for plane 0 */
291    __m128i span_1;                /* 0,dcdx,2dcdx,3dcdx for plane 1 */
292    __m128i span_2;                /* 0,dcdx,2dcdx,3dcdx for plane 2 */
293    __m128i unused;
294 
295    transpose4_epi32(&p0, &p1, &p2, &zero,
296                     &c, &unused, &dcdx, &dcdy);
297 
298    /* recalc eo - easier than trying to load as scalars / shuffle... */
299    dcdx_neg_mask = _mm_srai_epi32(dcdx, 31);
300    dcdy_neg_mask = _mm_srai_epi32(dcdy, 31);
301    rej4 = _mm_sub_epi32(_mm_andnot_si128(dcdy_neg_mask, dcdy),
302                         _mm_and_si128(dcdx_neg_mask, dcdx));
303 
304    /* Adjust dcdx;
305     */
306    dcdx = _mm_sub_epi32(zero, dcdx);
307 
308    c = _mm_add_epi32(c, mm_mullo_epi32(dcdx, _mm_set1_epi32(x)));
309    c = _mm_add_epi32(c, mm_mullo_epi32(dcdy, _mm_set1_epi32(y)));
310    rej4 = _mm_slli_epi32(rej4, 2);
311 
312    /* Adjust so we can just check the sign bit (< 0 comparison), instead of having to do a less efficient <= 0 comparison */
313    c = _mm_sub_epi32(c, _mm_set1_epi32(1));
314    rej4 = _mm_add_epi32(rej4, _mm_set1_epi32(1));
315 
316    dcdx2 = _mm_add_epi32(dcdx, dcdx);
317    dcdx3 = _mm_add_epi32(dcdx2, dcdx);
318 
319    transpose4_epi32(&zero, &dcdx, &dcdx2, &dcdx3,
320                     &span_0, &span_1, &span_2, &unused);
321 
322    for (i = 0; i < 4; i++) {
323       __m128i cx = c;
324 
325       for (j = 0; j < 4; j++) {
326          __m128i c4rej = _mm_add_epi32(cx, rej4);
327          __m128i rej_masks = _mm_srai_epi32(c4rej, 31);
328 
329          /* if (is_zero(rej_masks)) */
330          if (_mm_movemask_epi8(rej_masks) == 0) {
331             __m128i c0_0 = _mm_add_epi32(SCALAR_EPI32(cx, 0), span_0);
332             __m128i c1_0 = _mm_add_epi32(SCALAR_EPI32(cx, 1), span_1);
333             __m128i c2_0 = _mm_add_epi32(SCALAR_EPI32(cx, 2), span_2);
334 
335             __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0);
336 
337             __m128i c0_1 = _mm_add_epi32(c0_0, SCALAR_EPI32(dcdy, 0));
338             __m128i c1_1 = _mm_add_epi32(c1_0, SCALAR_EPI32(dcdy, 1));
339             __m128i c2_1 = _mm_add_epi32(c2_0, SCALAR_EPI32(dcdy, 2));
340 
341             __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1);
342             __m128i c_01 = _mm_packs_epi32(c_0, c_1);
343 
344             __m128i c0_2 = _mm_add_epi32(c0_1, SCALAR_EPI32(dcdy, 0));
345             __m128i c1_2 = _mm_add_epi32(c1_1, SCALAR_EPI32(dcdy, 1));
346             __m128i c2_2 = _mm_add_epi32(c2_1, SCALAR_EPI32(dcdy, 2));
347 
348             __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2);
349 
350             __m128i c0_3 = _mm_add_epi32(c0_2, SCALAR_EPI32(dcdy, 0));
351             __m128i c1_3 = _mm_add_epi32(c1_2, SCALAR_EPI32(dcdy, 1));
352             __m128i c2_3 = _mm_add_epi32(c2_2, SCALAR_EPI32(dcdy, 2));
353 
354             __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3);
355             __m128i c_23 = _mm_packs_epi32(c_2, c_3);
356             __m128i c_0123 = _mm_packs_epi16(c_01, c_23);
357 
358             unsigned mask = _mm_movemask_epi8(c_0123);
359 
360             out[nr].i = i;
361             out[nr].j = j;
362             out[nr].mask = mask;
363             if (mask != 0xffff)
364                nr++;
365          }
366          cx = _mm_add_epi32(cx, _mm_slli_epi32(dcdx, 2));
367       }
368 
369       c = _mm_add_epi32(c, _mm_slli_epi32(dcdy, 2));
370    }
371 
372    for (i = 0; i < nr; i++)
373       lp_rast_shade_quads_mask(task,
374                                &tri->inputs,
375                                x + 4 * out[i].j,
376                                y + 4 * out[i].i,
377                                0xffff & ~out[i].mask);
378 }
379 
380 void
lp_rast_triangle_32_3_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)381 lp_rast_triangle_32_3_4(struct lp_rasterizer_task *task,
382                         const union lp_rast_cmd_arg arg)
383 {
384    const struct lp_rast_triangle *tri = arg.triangle.tri;
385    const struct lp_rast_plane *plane = GET_PLANES(tri);
386    unsigned x = (arg.triangle.plane_mask & 0xff) + task->x;
387    unsigned y = (arg.triangle.plane_mask >> 8) + task->y;
388 
389    /* p0 and p2 are aligned, p1 is not (plane size 24 bytes). */
390    __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* clo, chi, dcdx, dcdy */
391    __m128i p1 = _mm_loadu_si128((__m128i *)&plane[1]);
392    __m128i p2 = _mm_load_si128((__m128i *)&plane[2]);
393    __m128i zero = _mm_setzero_si128();
394 
395    __m128i c, dcdx, dcdy;
396    __m128i dcdx2, dcdx3;
397 
398    __m128i span_0;                /* 0,dcdx,2dcdx,3dcdx for plane 0 */
399    __m128i span_1;                /* 0,dcdx,2dcdx,3dcdx for plane 1 */
400    __m128i span_2;                /* 0,dcdx,2dcdx,3dcdx for plane 2 */
401    __m128i unused;
402 
403    transpose4_epi32(&p0, &p1, &p2, &zero,
404                     &c, &unused, &dcdx, &dcdy);
405 
406    /* Adjust dcdx;
407     */
408    dcdx = _mm_sub_epi32(zero, dcdx);
409 
410    c = _mm_add_epi32(c, mm_mullo_epi32(dcdx, _mm_set1_epi32(x)));
411    c = _mm_add_epi32(c, mm_mullo_epi32(dcdy, _mm_set1_epi32(y)));
412 
413    /* Adjust so we can just check the sign bit (< 0 comparison), instead of having to do a less efficient <= 0 comparison */
414    c = _mm_sub_epi32(c, _mm_set1_epi32(1));
415 
416    dcdx2 = _mm_add_epi32(dcdx, dcdx);
417    dcdx3 = _mm_add_epi32(dcdx2, dcdx);
418 
419    transpose4_epi32(&zero, &dcdx, &dcdx2, &dcdx3,
420                     &span_0, &span_1, &span_2, &unused);
421 
422 
423    {
424       __m128i c0_0 = _mm_add_epi32(SCALAR_EPI32(c, 0), span_0);
425       __m128i c1_0 = _mm_add_epi32(SCALAR_EPI32(c, 1), span_1);
426       __m128i c2_0 = _mm_add_epi32(SCALAR_EPI32(c, 2), span_2);
427 
428       __m128i c_0 = _mm_or_si128(_mm_or_si128(c0_0, c1_0), c2_0);
429 
430       __m128i c0_1 = _mm_add_epi32(c0_0, SCALAR_EPI32(dcdy, 0));
431       __m128i c1_1 = _mm_add_epi32(c1_0, SCALAR_EPI32(dcdy, 1));
432       __m128i c2_1 = _mm_add_epi32(c2_0, SCALAR_EPI32(dcdy, 2));
433 
434       __m128i c_1 = _mm_or_si128(_mm_or_si128(c0_1, c1_1), c2_1);
435       __m128i c_01 = _mm_packs_epi32(c_0, c_1);
436 
437       __m128i c0_2 = _mm_add_epi32(c0_1, SCALAR_EPI32(dcdy, 0));
438       __m128i c1_2 = _mm_add_epi32(c1_1, SCALAR_EPI32(dcdy, 1));
439       __m128i c2_2 = _mm_add_epi32(c2_1, SCALAR_EPI32(dcdy, 2));
440 
441       __m128i c_2 = _mm_or_si128(_mm_or_si128(c0_2, c1_2), c2_2);
442 
443       __m128i c0_3 = _mm_add_epi32(c0_2, SCALAR_EPI32(dcdy, 0));
444       __m128i c1_3 = _mm_add_epi32(c1_2, SCALAR_EPI32(dcdy, 1));
445       __m128i c2_3 = _mm_add_epi32(c2_2, SCALAR_EPI32(dcdy, 2));
446 
447       __m128i c_3 = _mm_or_si128(_mm_or_si128(c0_3, c1_3), c2_3);
448       __m128i c_23 = _mm_packs_epi32(c_2, c_3);
449       __m128i c_0123 = _mm_packs_epi16(c_01, c_23);
450 
451       unsigned mask = _mm_movemask_epi8(c_0123);
452 
453       if (mask != 0xffff)
454          lp_rast_shade_quads_mask(task,
455                                   &tri->inputs,
456                                   x,
457                                   y,
458                                   0xffff & ~mask);
459    }
460 }
461 
462 #undef NR_PLANES
463 
464 #else
465 
466 #if defined(_ARCH_PWR8) && UTIL_ARCH_LITTLE_ENDIAN
467 
468 #include <altivec.h>
469 #include "util/u_pwr8.h"
470 
471 static inline void
build_masks_ppc(int c,int cdiff,int dcdx,int dcdy,unsigned * outmask,unsigned * partmask)472 build_masks_ppc(int c,
473                 int cdiff,
474                 int dcdx,
475                 int dcdy,
476                 unsigned *outmask,
477                 unsigned *partmask)
478 {
479    __m128i cstep0 = vec_setr_epi32(c, c+dcdx, c+dcdx*2, c+dcdx*3);
480    __m128i xdcdy = (__m128i) vec_splats(dcdy);
481 
482    /* Get values across the quad
483     */
484    __m128i cstep1 = vec_add_epi32(cstep0, xdcdy);
485    __m128i cstep2 = vec_add_epi32(cstep1, xdcdy);
486    __m128i cstep3 = vec_add_epi32(cstep2, xdcdy);
487 
488    {
489       __m128i cstep01, cstep23, result;
490 
491       cstep01 = vec_packs_epi32(cstep0, cstep1);
492       cstep23 = vec_packs_epi32(cstep2, cstep3);
493       result = vec_packs_epi16(cstep01, cstep23);
494 
495       *outmask |= vec_movemask_epi8(result);
496    }
497 
498 
499    {
500       __m128i cio4 = (__m128i) vec_splats(cdiff);
501       __m128i cstep01, cstep23, result;
502 
503       cstep0 = vec_add_epi32(cstep0, cio4);
504       cstep1 = vec_add_epi32(cstep1, cio4);
505       cstep2 = vec_add_epi32(cstep2, cio4);
506       cstep3 = vec_add_epi32(cstep3, cio4);
507 
508       cstep01 = vec_packs_epi32(cstep0, cstep1);
509       cstep23 = vec_packs_epi32(cstep2, cstep3);
510       result = vec_packs_epi16(cstep01, cstep23);
511 
512       *partmask |= vec_movemask_epi8(result);
513    }
514 }
515 
516 static inline unsigned
build_mask_linear_ppc(int c,int dcdx,int dcdy)517 build_mask_linear_ppc(int c, int dcdx, int dcdy)
518 {
519    __m128i cstep0 = vec_setr_epi32(c, c+dcdx, c+dcdx*2, c+dcdx*3);
520    __m128i xdcdy = (__m128i) vec_splats(dcdy);
521 
522    /* Get values across the quad
523     */
524    __m128i cstep1 = vec_add_epi32(cstep0, xdcdy);
525    __m128i cstep2 = vec_add_epi32(cstep1, xdcdy);
526    __m128i cstep3 = vec_add_epi32(cstep2, xdcdy);
527 
528    /* pack pairs of results into epi16
529     */
530    __m128i cstep01 = vec_packs_epi32(cstep0, cstep1);
531    __m128i cstep23 = vec_packs_epi32(cstep2, cstep3);
532 
533    /* pack into epi8, preserving sign bits
534     */
535    __m128i result = vec_packs_epi16(cstep01, cstep23);
536 
537    /* extract sign bits to create mask
538     */
539    return vec_movemask_epi8(result);
540 }
541 
542 static inline __m128i
lp_plane_to_m128i(const struct lp_rast_plane * plane)543 lp_plane_to_m128i(const struct lp_rast_plane *plane)
544 {
545    return vec_setr_epi32((int32_t)plane->c, (int32_t)plane->dcdx,
546                          (int32_t)plane->dcdy, (int32_t)plane->eo);
547 }
548 
549 #define NR_PLANES 3
550 
551 void
lp_rast_triangle_32_3_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)552 lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
553                       const union lp_rast_cmd_arg arg)
554 {
555    const struct lp_rast_triangle *tri = arg.triangle.tri;
556    const struct lp_rast_plane *plane = GET_PLANES(tri);
557    int x = (arg.triangle.plane_mask & 0xff) + task->x;
558    int y = (arg.triangle.plane_mask >> 8) + task->y;
559    unsigned i, j;
560 
561    struct { unsigned mask:16; unsigned i:8; unsigned j:8; } out[16];
562    unsigned nr = 0;
563 
564    __m128i p0 = lp_plane_to_m128i(&plane[0]); /* c, dcdx, dcdy, eo */
565    __m128i p1 = lp_plane_to_m128i(&plane[1]); /* c, dcdx, dcdy, eo */
566    __m128i p2 = lp_plane_to_m128i(&plane[2]); /* c, dcdx, dcdy, eo */
567    __m128i zero = vec_splats((unsigned char) 0);
568 
569    __m128i c;
570    __m128i dcdx;
571    __m128i dcdy;
572    __m128i rej4;
573 
574    __m128i dcdx2;
575    __m128i dcdx3;
576 
577    __m128i span_0;                /* 0,dcdx,2dcdx,3dcdx for plane 0 */
578    __m128i span_1;                /* 0,dcdx,2dcdx,3dcdx for plane 1 */
579    __m128i span_2;                /* 0,dcdx,2dcdx,3dcdx for plane 2 */
580    __m128i unused;
581 
582    __m128i vshuf_mask0;
583    __m128i vshuf_mask1;
584    __m128i vshuf_mask2;
585 
586 #if UTIL_ARCH_LITTLE_ENDIAN
587    vshuf_mask0 = (__m128i) vec_splats((unsigned int) 0x03020100);
588    vshuf_mask1 = (__m128i) vec_splats((unsigned int) 0x07060504);
589    vshuf_mask2 = (__m128i) vec_splats((unsigned int) 0x0B0A0908);
590 #else
591    vshuf_mask0 = (__m128i) vec_splats((unsigned int) 0x0C0D0E0F);
592    vshuf_mask1 = (__m128i) vec_splats((unsigned int) 0x08090A0B);
593    vshuf_mask2 = (__m128i) vec_splats((unsigned int) 0x04050607);
594 #endif
595 
596    transpose4_epi32(&p0, &p1, &p2, &zero,
597                     &c, &dcdx, &dcdy, &rej4);
598 
599    /* Adjust dcdx;
600     */
601    dcdx = vec_sub_epi32(zero, dcdx);
602 
603    c = vec_add_epi32(c, vec_mullo_epi32(dcdx, (__m128i) vec_splats(x)));
604    c = vec_add_epi32(c, vec_mullo_epi32(dcdy, (__m128i) vec_splats(y)));
605    rej4 = vec_slli_epi32(rej4, 2);
606 
607    /*
608     * Adjust so we can just check the sign bit (< 0 comparison),
609     * instead of having to do a less efficient <= 0 comparison
610     */
611    c = vec_sub_epi32(c, (__m128i) vec_splats((unsigned int) 1));
612    rej4 = vec_add_epi32(rej4, (__m128i) vec_splats((unsigned int) 1));
613 
614    dcdx2 = vec_add_epi32(dcdx, dcdx);
615    dcdx3 = vec_add_epi32(dcdx2, dcdx);
616 
617    transpose4_epi32(&zero, &dcdx, &dcdx2, &dcdx3,
618                     &span_0, &span_1, &span_2, &unused);
619 
620    for (i = 0; i < 4; i++) {
621       __m128i cx = c;
622 
623       for (j = 0; j < 4; j++) {
624          __m128i c4rej = vec_add_epi32(cx, rej4);
625          __m128i rej_masks = vec_srai_epi32(c4rej, 31);
626 
627          /* if (is_zero(rej_masks)) */
628          if (vec_movemask_epi8(rej_masks) == 0) {
629             __m128i c0_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask0), span_0);
630             __m128i c1_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask1), span_1);
631             __m128i c2_0 = vec_add_epi32(vec_perm(cx, cx, vshuf_mask2), span_2);
632 
633             __m128i c_0 = vec_or(vec_or(c0_0, c1_0), c2_0);
634 
635             __m128i c0_1 = vec_add_epi32(c0_0, vec_perm(dcdy, dcdy, vshuf_mask0));
636             __m128i c1_1 = vec_add_epi32(c1_0, vec_perm(dcdy, dcdy, vshuf_mask1));
637             __m128i c2_1 = vec_add_epi32(c2_0, vec_perm(dcdy, dcdy, vshuf_mask2));
638 
639             __m128i c_1 = vec_or(vec_or(c0_1, c1_1), c2_1);
640             __m128i c_01 = vec_packs_epi32(c_0, c_1);
641 
642             __m128i c0_2 = vec_add_epi32(c0_1, vec_perm(dcdy, dcdy, vshuf_mask0));
643             __m128i c1_2 = vec_add_epi32(c1_1, vec_perm(dcdy, dcdy, vshuf_mask1));
644             __m128i c2_2 = vec_add_epi32(c2_1, vec_perm(dcdy, dcdy, vshuf_mask2));
645 
646             __m128i c_2 = vec_or(vec_or(c0_2, c1_2), c2_2);
647 
648             __m128i c0_3 = vec_add_epi32(c0_2, vec_perm(dcdy, dcdy, vshuf_mask0));
649             __m128i c1_3 = vec_add_epi32(c1_2, vec_perm(dcdy, dcdy, vshuf_mask1));
650             __m128i c2_3 = vec_add_epi32(c2_2, vec_perm(dcdy, dcdy, vshuf_mask2));
651 
652             __m128i c_3 = vec_or(vec_or(c0_3, c1_3), c2_3);
653             __m128i c_23 = vec_packs_epi32(c_2, c_3);
654             __m128i c_0123 = vec_packs_epi16(c_01, c_23);
655 
656             unsigned mask = vec_movemask_epi8(c_0123);
657 
658             out[nr].i = i;
659             out[nr].j = j;
660             out[nr].mask = mask;
661             if (mask != 0xffff)
662                nr++;
663          }
664          cx = vec_add_epi32(cx, vec_slli_epi32(dcdx, 2));
665       }
666 
667       c = vec_add_epi32(c, vec_slli_epi32(dcdy, 2));
668    }
669 
670    for (i = 0; i < nr; i++)
671       lp_rast_shade_quads_mask(task,
672                                &tri->inputs,
673                                x + 4 * out[i].j,
674                                y + 4 * out[i].i,
675                                0xffff & ~out[i].mask);
676 }
677 
678 #undef NR_PLANES
679 
680 #else
681 
682 void
lp_rast_triangle_32_3_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)683 lp_rast_triangle_32_3_16(struct lp_rasterizer_task *task,
684                          const union lp_rast_cmd_arg arg)
685 {
686    union lp_rast_cmd_arg arg2;
687    arg2.triangle.tri = arg.triangle.tri;
688    arg2.triangle.plane_mask = (1<<3)-1;
689    lp_rast_triangle_32_3(task, arg2);
690 }
691 
692 #endif /* _ARCH_PWR8 && UTIL_ARCH_LITTLE_ENDIAN */
693 
694 void
lp_rast_triangle_32_4_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)695 lp_rast_triangle_32_4_16(struct lp_rasterizer_task *task,
696                          const union lp_rast_cmd_arg arg)
697 {
698    union lp_rast_cmd_arg arg2;
699    arg2.triangle.tri = arg.triangle.tri;
700    arg2.triangle.plane_mask = (1<<4)-1;
701    lp_rast_triangle_32_4(task, arg2);
702 }
703 
704 void
lp_rast_triangle_32_3_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)705 lp_rast_triangle_32_3_4(struct lp_rasterizer_task *task,
706                       const union lp_rast_cmd_arg arg)
707 {
708    lp_rast_triangle_32_3_16(task, arg);
709 }
710 
711 #endif
712 
713 #if defined PIPE_ARCH_SSE
714 #define BUILD_MASKS(c, cdiff, dcdx, dcdy, omask, pmask) build_masks_sse((int)c, (int)cdiff, dcdx, dcdy, omask, pmask)
715 #define BUILD_MASK_LINEAR(c, dcdx, dcdy) build_mask_linear_sse((int)c, dcdx, dcdy)
716 #elif (defined(_ARCH_PWR8) && UTIL_ARCH_LITTLE_ENDIAN)
717 #define BUILD_MASKS(c, cdiff, dcdx, dcdy, omask, pmask) build_masks_ppc((int)c, (int)cdiff, dcdx, dcdy, omask, pmask)
718 #define BUILD_MASK_LINEAR(c, dcdx, dcdy) build_mask_linear_ppc((int)c, dcdx, dcdy)
719 #else
720 #define BUILD_MASKS(c, cdiff, dcdx, dcdy, omask, pmask) build_masks(c, cdiff, dcdx, dcdy, omask, pmask)
721 #define BUILD_MASK_LINEAR(c, dcdx, dcdy) build_mask_linear(c, dcdx, dcdy)
722 #endif
723 
724 #define RASTER_64 1
725 
726 #define TAG(x) x##_1
727 #define NR_PLANES 1
728 #include "lp_rast_tri_tmp.h"
729 
730 #define TAG(x) x##_2
731 #define NR_PLANES 2
732 #include "lp_rast_tri_tmp.h"
733 
734 #define TAG(x) x##_3
735 #define NR_PLANES 3
736 /*#define TRI_4 lp_rast_triangle_3_4*/
737 /*#define TRI_16 lp_rast_triangle_3_16*/
738 #include "lp_rast_tri_tmp.h"
739 
740 #define TAG(x) x##_4
741 #define NR_PLANES 4
742 /*#define TRI_16 lp_rast_triangle_4_16*/
743 #include "lp_rast_tri_tmp.h"
744 
745 #define TAG(x) x##_5
746 #define NR_PLANES 5
747 #include "lp_rast_tri_tmp.h"
748 
749 #define TAG(x) x##_6
750 #define NR_PLANES 6
751 #include "lp_rast_tri_tmp.h"
752 
753 #define TAG(x) x##_7
754 #define NR_PLANES 7
755 #include "lp_rast_tri_tmp.h"
756 
757 #define TAG(x) x##_8
758 #define NR_PLANES 8
759 #include "lp_rast_tri_tmp.h"
760 
761 #undef RASTER_64
762 
763 #define TAG(x) x##_32_1
764 #define NR_PLANES 1
765 #include "lp_rast_tri_tmp.h"
766 
767 #define TAG(x) x##_32_2
768 #define NR_PLANES 2
769 #include "lp_rast_tri_tmp.h"
770 
771 #define TAG(x) x##_32_3
772 #define NR_PLANES 3
773 /*#define TRI_4 lp_rast_triangle_3_4*/
774 /*#define TRI_16 lp_rast_triangle_3_16*/
775 #include "lp_rast_tri_tmp.h"
776 
777 #define TAG(x) x##_32_4
778 #define NR_PLANES 4
779 #ifdef PIPE_ARCH_SSE
780 #define TRI_16 lp_rast_triangle_32_4_16
781 #endif
782 #include "lp_rast_tri_tmp.h"
783 
784 #define TAG(x) x##_32_5
785 #define NR_PLANES 5
786 #include "lp_rast_tri_tmp.h"
787 
788 #define TAG(x) x##_32_6
789 #define NR_PLANES 6
790 #include "lp_rast_tri_tmp.h"
791 
792 #define TAG(x) x##_32_7
793 #define NR_PLANES 7
794 #include "lp_rast_tri_tmp.h"
795 
796 #define TAG(x) x##_32_8
797 #define NR_PLANES 8
798 #include "lp_rast_tri_tmp.h"
799 
800 #define MULTISAMPLE 1
801 #define RASTER_64 1
802 
803 #define TAG(x) x##_ms_1
804 #define NR_PLANES 1
805 #include "lp_rast_tri_tmp.h"
806 
807 #define TAG(x) x##_ms_2
808 #define NR_PLANES 2
809 #include "lp_rast_tri_tmp.h"
810 
811 #define TAG(x) x##_ms_3
812 #define NR_PLANES 3
813 /*#define TRI_4 lp_rast_triangle_3_4*/
814 /*#define TRI_16 lp_rast_triangle_3_16*/
815 #include "lp_rast_tri_tmp.h"
816 
817 #define TAG(x) x##_ms_4
818 #define NR_PLANES 4
819 /*#define TRI_16 lp_rast_triangle_4_16*/
820 #include "lp_rast_tri_tmp.h"
821 
822 #define TAG(x) x##_ms_5
823 #define NR_PLANES 5
824 #include "lp_rast_tri_tmp.h"
825 
826 #define TAG(x) x##_ms_6
827 #define NR_PLANES 6
828 #include "lp_rast_tri_tmp.h"
829 
830 #define TAG(x) x##_ms_7
831 #define NR_PLANES 7
832 #include "lp_rast_tri_tmp.h"
833 
834 #define TAG(x) x##_ms_8
835 #define NR_PLANES 8
836 #include "lp_rast_tri_tmp.h"
837 
838 #undef RASTER_64
839