1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "main/enums.h"
35 #include "program/program.h"
36
37 #include "brw_clip.h"
38
39
get_tmp(struct brw_clip_compile * c)40 struct brw_reg get_tmp( struct brw_clip_compile *c )
41 {
42 struct brw_reg tmp = brw_vec4_grf(c->last_tmp, 0);
43
44 if (++c->last_tmp > c->prog_data.total_grf)
45 c->prog_data.total_grf = c->last_tmp;
46
47 return tmp;
48 }
49
release_tmp(struct brw_clip_compile * c,struct brw_reg tmp)50 static void release_tmp( struct brw_clip_compile *c, struct brw_reg tmp )
51 {
52 if (tmp.nr == c->last_tmp-1)
53 c->last_tmp--;
54 }
55
56
make_plane_ud(GLuint x,GLuint y,GLuint z,GLuint w)57 static struct brw_reg make_plane_ud(GLuint x, GLuint y, GLuint z, GLuint w)
58 {
59 return brw_imm_ud((w<<24) | (z<<16) | (y<<8) | x);
60 }
61
62
brw_clip_init_planes(struct brw_clip_compile * c)63 void brw_clip_init_planes( struct brw_clip_compile *c )
64 {
65 struct brw_codegen *p = &c->func;
66
67 if (!c->key.nr_userclip) {
68 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 0), make_plane_ud( 0, 0, 0xff, 1));
69 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 1), make_plane_ud( 0, 0, 1, 1));
70 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 2), make_plane_ud( 0, 0xff, 0, 1));
71 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 3), make_plane_ud( 0, 1, 0, 1));
72 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 4), make_plane_ud(0xff, 0, 0, 1));
73 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 5), make_plane_ud( 1, 0, 0, 1));
74 }
75 }
76
77
78
79 #define W 3
80
81 /* Project 'pos' to screen space (or back again), overwrite with results:
82 */
brw_clip_project_position(struct brw_clip_compile * c,struct brw_reg pos)83 void brw_clip_project_position(struct brw_clip_compile *c, struct brw_reg pos )
84 {
85 struct brw_codegen *p = &c->func;
86
87 /* calc rhw
88 */
89 brw_math_invert(p, get_element(pos, W), get_element(pos, W));
90
91 /* value.xyz *= value.rhw
92 */
93 brw_set_default_access_mode(p, BRW_ALIGN_16);
94 brw_MUL(p, brw_writemask(pos, WRITEMASK_XYZ), pos,
95 brw_swizzle(pos, BRW_SWIZZLE_WWWW));
96 brw_set_default_access_mode(p, BRW_ALIGN_1);
97 }
98
99
brw_clip_project_vertex(struct brw_clip_compile * c,struct brw_indirect vert_addr)100 static void brw_clip_project_vertex( struct brw_clip_compile *c,
101 struct brw_indirect vert_addr )
102 {
103 struct brw_codegen *p = &c->func;
104 struct brw_reg tmp = get_tmp(c);
105 GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
106 GLuint ndc_offset = brw_varying_to_offset(&c->vue_map,
107 BRW_VARYING_SLOT_NDC);
108
109 /* Fixup position. Extract from the original vertex and re-project
110 * to screen space:
111 */
112 brw_MOV(p, tmp, deref_4f(vert_addr, hpos_offset));
113 brw_clip_project_position(c, tmp);
114 brw_MOV(p, deref_4f(vert_addr, ndc_offset), tmp);
115
116 release_tmp(c, tmp);
117 }
118
119
120
121
122 /* Interpolate between two vertices and put the result into a0.0.
123 * Increment a0.0 accordingly.
124 *
125 * Beware that dest_ptr can be equal to v0_ptr!
126 */
brw_clip_interp_vertex(struct brw_clip_compile * c,struct brw_indirect dest_ptr,struct brw_indirect v0_ptr,struct brw_indirect v1_ptr,struct brw_reg t0,bool force_edgeflag)127 void brw_clip_interp_vertex( struct brw_clip_compile *c,
128 struct brw_indirect dest_ptr,
129 struct brw_indirect v0_ptr, /* from */
130 struct brw_indirect v1_ptr, /* to */
131 struct brw_reg t0,
132 bool force_edgeflag)
133 {
134 struct brw_codegen *p = &c->func;
135 struct brw_reg t_nopersp, v0_ndc_copy;
136 GLuint slot;
137
138 /* Just copy the vertex header:
139 */
140 /*
141 * After CLIP stage, only first 256 bits of the VUE are read
142 * back on Ironlake, so needn't change it
143 */
144 brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1);
145
146
147 /* First handle the 3D and NDC interpolation, in case we
148 * need noperspective interpolation. Doing it early has no
149 * performance impact in any case.
150 */
151
152 /* Take a copy of the v0 NDC coordinates, in case dest == v0. */
153 if (c->key.contains_noperspective_varying) {
154 GLuint offset = brw_varying_to_offset(&c->vue_map,
155 BRW_VARYING_SLOT_NDC);
156 v0_ndc_copy = get_tmp(c);
157 brw_MOV(p, v0_ndc_copy, deref_4f(v0_ptr, offset));
158 }
159
160 /* Compute the new 3D position
161 *
162 * dest_hpos = v0_hpos * (1 - t0) + v1_hpos * t0
163 */
164 {
165 GLuint delta = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
166 struct brw_reg tmp = get_tmp(c);
167 brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0);
168 brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0);
169 brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp);
170 release_tmp(c, tmp);
171 }
172
173 /* Recreate the projected (NDC) coordinate in the new vertex header */
174 brw_clip_project_vertex(c, dest_ptr);
175
176 /* If we have noperspective attributes,
177 * we need to compute the screen-space t
178 */
179 if (c->key.contains_noperspective_varying) {
180 GLuint delta = brw_varying_to_offset(&c->vue_map,
181 BRW_VARYING_SLOT_NDC);
182 struct brw_reg tmp = get_tmp(c);
183 t_nopersp = get_tmp(c);
184
185 /* t_nopersp = vec4(v1.xy, dest.xy) */
186 brw_MOV(p, t_nopersp, deref_4f(v1_ptr, delta));
187 brw_MOV(p, tmp, deref_4f(dest_ptr, delta));
188 brw_set_default_access_mode(p, BRW_ALIGN_16);
189 brw_MOV(p,
190 brw_writemask(t_nopersp, WRITEMASK_ZW),
191 brw_swizzle(tmp, BRW_SWIZZLE_XYXY));
192
193 /* t_nopersp = vec4(v1.xy, dest.xy) - v0.xyxy */
194 brw_ADD(p, t_nopersp, t_nopersp,
195 negate(brw_swizzle(v0_ndc_copy, BRW_SWIZZLE_XYXY)));
196
197 /* Add the absolute values of the X and Y deltas so that if
198 * the points aren't in the same place on the screen we get
199 * nonzero values to divide.
200 *
201 * After that, we have vert1 - vert0 in t_nopersp.x and
202 * vertnew - vert0 in t_nopersp.y
203 *
204 * t_nopersp = vec2(|v1.x -v0.x| + |v1.y -v0.y|,
205 * |dest.x-v0.x| + |dest.y-v0.y|)
206 */
207 brw_ADD(p,
208 brw_writemask(t_nopersp, WRITEMASK_XY),
209 brw_abs(brw_swizzle(t_nopersp, BRW_SWIZZLE_XZXZ)),
210 brw_abs(brw_swizzle(t_nopersp, BRW_SWIZZLE_YWYW)));
211 brw_set_default_access_mode(p, BRW_ALIGN_1);
212
213 /* If the points are in the same place, just substitute a
214 * value to avoid divide-by-zero
215 */
216 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ,
217 vec1(t_nopersp),
218 brw_imm_f(0));
219 brw_IF(p, BRW_EXECUTE_1);
220 brw_MOV(p, t_nopersp, brw_imm_vf4(brw_float_to_vf(1.0),
221 brw_float_to_vf(0.0),
222 brw_float_to_vf(0.0),
223 brw_float_to_vf(0.0)));
224 brw_ENDIF(p);
225
226 /* Now compute t_nopersp = t_nopersp.y/t_nopersp.x and broadcast it. */
227 brw_math_invert(p, get_element(t_nopersp, 0), get_element(t_nopersp, 0));
228 brw_MUL(p, vec1(t_nopersp), vec1(t_nopersp),
229 vec1(suboffset(t_nopersp, 1)));
230 brw_set_default_access_mode(p, BRW_ALIGN_16);
231 brw_MOV(p, t_nopersp, brw_swizzle(t_nopersp, BRW_SWIZZLE_XXXX));
232 brw_set_default_access_mode(p, BRW_ALIGN_1);
233
234 release_tmp(c, tmp);
235 release_tmp(c, v0_ndc_copy);
236 }
237
238 /* Now we can iterate over each attribute
239 * (could be done in pairs?)
240 */
241 for (slot = 0; slot < c->vue_map.num_slots; slot++) {
242 int varying = c->vue_map.slot_to_varying[slot];
243 GLuint delta = brw_vue_slot_to_offset(slot);
244
245 /* HPOS, NDC already handled above */
246 if (varying == VARYING_SLOT_POS || varying == BRW_VARYING_SLOT_NDC)
247 continue;
248
249
250 if (varying == VARYING_SLOT_EDGE) {
251 if (force_edgeflag)
252 brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1));
253 else
254 brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta));
255 } else if (varying == VARYING_SLOT_PSIZ) {
256 /* PSIZ doesn't need interpolation because it isn't used by the
257 * fragment shader.
258 */
259 } else if (varying < VARYING_SLOT_MAX) {
260 /* This is a true vertex result (and not a special value for the VUE
261 * header), so interpolate:
262 *
263 * New = attr0 + t*attr1 - t*attr0
264 *
265 * Unless the attribute is flat shaded -- in which case just copy
266 * from one of the sources (doesn't matter which; already copied from pv)
267 */
268 GLuint interp = c->key.interp_mode[slot];
269
270 if (interp != INTERP_MODE_FLAT) {
271 struct brw_reg tmp = get_tmp(c);
272 struct brw_reg t =
273 interp == INTERP_MODE_NOPERSPECTIVE ? t_nopersp : t0;
274
275 brw_MUL(p,
276 vec4(brw_null_reg()),
277 deref_4f(v1_ptr, delta),
278 t);
279
280 brw_MAC(p,
281 tmp,
282 negate(deref_4f(v0_ptr, delta)),
283 t);
284
285 brw_ADD(p,
286 deref_4f(dest_ptr, delta),
287 deref_4f(v0_ptr, delta),
288 tmp);
289
290 release_tmp(c, tmp);
291 }
292 else {
293 brw_MOV(p,
294 deref_4f(dest_ptr, delta),
295 deref_4f(v0_ptr, delta));
296 }
297 }
298 }
299
300 if (c->vue_map.num_slots % 2) {
301 GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots);
302
303 brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0));
304 }
305
306 if (c->key.contains_noperspective_varying)
307 release_tmp(c, t_nopersp);
308 }
309
brw_clip_emit_vue(struct brw_clip_compile * c,struct brw_indirect vert,enum brw_urb_write_flags flags,GLuint header)310 void brw_clip_emit_vue(struct brw_clip_compile *c,
311 struct brw_indirect vert,
312 enum brw_urb_write_flags flags,
313 GLuint header)
314 {
315 struct brw_codegen *p = &c->func;
316 bool allocate = flags & BRW_URB_WRITE_ALLOCATE;
317
318 brw_clip_ff_sync(c);
319
320 /* Any URB entry that is allocated must subsequently be used or discarded,
321 * so it doesn't make sense to mark EOT and ALLOCATE at the same time.
322 */
323 assert(!(allocate && (flags & BRW_URB_WRITE_EOT)));
324
325 /* Copy the vertex from vertn into m1..mN+1:
326 */
327 brw_copy_from_indirect(p, brw_message_reg(1), vert, c->nr_regs);
328
329 /* Overwrite PrimType and PrimStart in the message header, for
330 * each vertex in turn:
331 */
332 brw_MOV(p, get_element_ud(c->reg.R0, 2), brw_imm_ud(header));
333
334
335 /* Send each vertex as a separate write to the urb. This
336 * is different to the concept in brw_sf_emit.c, where
337 * subsequent writes are used to build up a single urb
338 * entry. Each of these writes instantiates a separate
339 * urb entry - (I think... what about 'allocate'?)
340 */
341 brw_urb_WRITE(p,
342 allocate ? c->reg.R0 : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
343 0,
344 c->reg.R0,
345 flags,
346 c->nr_regs + 1, /* msg length */
347 allocate ? 1 : 0, /* response_length */
348 0, /* urb offset */
349 BRW_URB_SWIZZLE_NONE);
350 }
351
352
353
brw_clip_kill_thread(struct brw_clip_compile * c)354 void brw_clip_kill_thread(struct brw_clip_compile *c)
355 {
356 struct brw_codegen *p = &c->func;
357
358 brw_clip_ff_sync(c);
359 /* Send an empty message to kill the thread and release any
360 * allocated urb entry:
361 */
362 brw_urb_WRITE(p,
363 retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
364 0,
365 c->reg.R0,
366 BRW_URB_WRITE_UNUSED | BRW_URB_WRITE_EOT_COMPLETE,
367 1, /* msg len */
368 0, /* response len */
369 0,
370 BRW_URB_SWIZZLE_NONE);
371 }
372
373
374
375
brw_clip_plane0_address(struct brw_clip_compile * c)376 struct brw_reg brw_clip_plane0_address( struct brw_clip_compile *c )
377 {
378 return brw_address(c->reg.fixed_planes);
379 }
380
381
brw_clip_plane_stride(struct brw_clip_compile * c)382 struct brw_reg brw_clip_plane_stride( struct brw_clip_compile *c )
383 {
384 if (c->key.nr_userclip) {
385 return brw_imm_uw(16);
386 }
387 else {
388 return brw_imm_uw(4);
389 }
390 }
391
392
393 /* Distribute flatshaded attributes from provoking vertex prior to
394 * clipping.
395 */
brw_clip_copy_flatshaded_attributes(struct brw_clip_compile * c,GLuint to,GLuint from)396 void brw_clip_copy_flatshaded_attributes( struct brw_clip_compile *c,
397 GLuint to, GLuint from )
398 {
399 struct brw_codegen *p = &c->func;
400
401 for (int i = 0; i < c->vue_map.num_slots; i++) {
402 if (c->key.interp_mode[i] == INTERP_MODE_FLAT) {
403 brw_MOV(p,
404 byte_offset(c->reg.vertex[to], brw_vue_slot_to_offset(i)),
405 byte_offset(c->reg.vertex[from], brw_vue_slot_to_offset(i)));
406 }
407 }
408 }
409
410
411
brw_clip_init_clipmask(struct brw_clip_compile * c)412 void brw_clip_init_clipmask( struct brw_clip_compile *c )
413 {
414 struct brw_codegen *p = &c->func;
415 struct brw_reg incoming = get_element_ud(c->reg.R0, 2);
416
417 /* Shift so that lowest outcode bit is rightmost:
418 */
419 brw_SHR(p, c->reg.planemask, incoming, brw_imm_ud(26));
420
421 if (c->key.nr_userclip) {
422 struct brw_reg tmp = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UD);
423
424 /* Rearrange userclip outcodes so that they come directly after
425 * the fixed plane bits.
426 */
427 if (p->devinfo->ver == 5 || p->devinfo->is_g4x)
428 brw_AND(p, tmp, incoming, brw_imm_ud(0xff<<14));
429 else
430 brw_AND(p, tmp, incoming, brw_imm_ud(0x3f<<14));
431
432 brw_SHR(p, tmp, tmp, brw_imm_ud(8));
433 brw_OR(p, c->reg.planemask, c->reg.planemask, tmp);
434
435 release_tmp(c, tmp);
436 }
437 }
438
brw_clip_ff_sync(struct brw_clip_compile * c)439 void brw_clip_ff_sync(struct brw_clip_compile *c)
440 {
441 struct brw_codegen *p = &c->func;
442
443 if (p->devinfo->ver == 5) {
444 brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
445 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_Z);
446 brw_IF(p, BRW_EXECUTE_1);
447 {
448 brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
449 brw_ff_sync(p,
450 c->reg.R0,
451 0,
452 c->reg.R0,
453 1, /* allocate */
454 1, /* response length */
455 0 /* eot */);
456 }
457 brw_ENDIF(p);
458 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
459 }
460 }
461
brw_clip_init_ff_sync(struct brw_clip_compile * c)462 void brw_clip_init_ff_sync(struct brw_clip_compile *c)
463 {
464 struct brw_codegen *p = &c->func;
465
466 if (p->devinfo->ver == 5) {
467 brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0));
468 }
469 }
470