1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "main/enums.h"
35 #include "program/program.h"
36
37 #include "intel_batchbuffer.h"
38
39 #include "brw_defines.h"
40 #include "brw_context.h"
41 #include "brw_eu.h"
42 #include "brw_clip.h"
43
44
45
46
get_tmp(struct brw_clip_compile * c)47 struct brw_reg get_tmp( struct brw_clip_compile *c )
48 {
49 struct brw_reg tmp = brw_vec4_grf(c->last_tmp, 0);
50
51 if (++c->last_tmp > c->prog_data.total_grf)
52 c->prog_data.total_grf = c->last_tmp;
53
54 return tmp;
55 }
56
release_tmp(struct brw_clip_compile * c,struct brw_reg tmp)57 static void release_tmp( struct brw_clip_compile *c, struct brw_reg tmp )
58 {
59 if (tmp.nr == c->last_tmp-1)
60 c->last_tmp--;
61 }
62
63
make_plane_ud(GLuint x,GLuint y,GLuint z,GLuint w)64 static struct brw_reg make_plane_ud(GLuint x, GLuint y, GLuint z, GLuint w)
65 {
66 return brw_imm_ud((w<<24) | (z<<16) | (y<<8) | x);
67 }
68
69
brw_clip_init_planes(struct brw_clip_compile * c)70 void brw_clip_init_planes( struct brw_clip_compile *c )
71 {
72 struct brw_codegen *p = &c->func;
73
74 if (!c->key.nr_userclip) {
75 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 0), make_plane_ud( 0, 0, 0xff, 1));
76 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 1), make_plane_ud( 0, 0, 1, 1));
77 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 2), make_plane_ud( 0, 0xff, 0, 1));
78 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 3), make_plane_ud( 0, 1, 0, 1));
79 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 4), make_plane_ud(0xff, 0, 0, 1));
80 brw_MOV(p, get_element_ud(c->reg.fixed_planes, 5), make_plane_ud( 1, 0, 0, 1));
81 }
82 }
83
84
85
86 #define W 3
87
88 /* Project 'pos' to screen space (or back again), overwrite with results:
89 */
brw_clip_project_position(struct brw_clip_compile * c,struct brw_reg pos)90 void brw_clip_project_position(struct brw_clip_compile *c, struct brw_reg pos )
91 {
92 struct brw_codegen *p = &c->func;
93
94 /* calc rhw
95 */
96 brw_math_invert(p, get_element(pos, W), get_element(pos, W));
97
98 /* value.xyz *= value.rhw
99 */
100 brw_set_default_access_mode(p, BRW_ALIGN_16);
101 brw_MUL(p, brw_writemask(pos, WRITEMASK_XYZ), pos,
102 brw_swizzle(pos, BRW_SWIZZLE_WWWW));
103 brw_set_default_access_mode(p, BRW_ALIGN_1);
104 }
105
106
brw_clip_project_vertex(struct brw_clip_compile * c,struct brw_indirect vert_addr)107 static void brw_clip_project_vertex( struct brw_clip_compile *c,
108 struct brw_indirect vert_addr )
109 {
110 struct brw_codegen *p = &c->func;
111 struct brw_reg tmp = get_tmp(c);
112 GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
113 GLuint ndc_offset = brw_varying_to_offset(&c->vue_map,
114 BRW_VARYING_SLOT_NDC);
115
116 /* Fixup position. Extract from the original vertex and re-project
117 * to screen space:
118 */
119 brw_MOV(p, tmp, deref_4f(vert_addr, hpos_offset));
120 brw_clip_project_position(c, tmp);
121 brw_MOV(p, deref_4f(vert_addr, ndc_offset), tmp);
122
123 release_tmp(c, tmp);
124 }
125
126
127
128
129 /* Interpolate between two vertices and put the result into a0.0.
130 * Increment a0.0 accordingly.
131 *
132 * Beware that dest_ptr can be equal to v0_ptr!
133 */
brw_clip_interp_vertex(struct brw_clip_compile * c,struct brw_indirect dest_ptr,struct brw_indirect v0_ptr,struct brw_indirect v1_ptr,struct brw_reg t0,bool force_edgeflag)134 void brw_clip_interp_vertex( struct brw_clip_compile *c,
135 struct brw_indirect dest_ptr,
136 struct brw_indirect v0_ptr, /* from */
137 struct brw_indirect v1_ptr, /* to */
138 struct brw_reg t0,
139 bool force_edgeflag)
140 {
141 struct brw_codegen *p = &c->func;
142 struct brw_reg t_nopersp, v0_ndc_copy;
143 GLuint slot;
144
145 /* Just copy the vertex header:
146 */
147 /*
148 * After CLIP stage, only first 256 bits of the VUE are read
149 * back on Ironlake, so needn't change it
150 */
151 brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1);
152
153
154 /* First handle the 3D and NDC interpolation, in case we
155 * need noperspective interpolation. Doing it early has no
156 * performance impact in any case.
157 */
158
159 /* Take a copy of the v0 NDC coordinates, in case dest == v0. */
160 if (c->key.contains_noperspective_varying) {
161 GLuint offset = brw_varying_to_offset(&c->vue_map,
162 BRW_VARYING_SLOT_NDC);
163 v0_ndc_copy = get_tmp(c);
164 brw_MOV(p, v0_ndc_copy, deref_4f(v0_ptr, offset));
165 }
166
167 /* Compute the new 3D position
168 *
169 * dest_hpos = v0_hpos * (1 - t0) + v1_hpos * t0
170 */
171 {
172 GLuint delta = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
173 struct brw_reg tmp = get_tmp(c);
174 brw_MUL(p, vec4(brw_null_reg()), deref_4f(v1_ptr, delta), t0);
175 brw_MAC(p, tmp, negate(deref_4f(v0_ptr, delta)), t0);
176 brw_ADD(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta), tmp);
177 release_tmp(c, tmp);
178 }
179
180 /* Recreate the projected (NDC) coordinate in the new vertex header */
181 brw_clip_project_vertex(c, dest_ptr);
182
183 /* If we have noperspective attributes,
184 * we need to compute the screen-space t
185 */
186 if (c->key.contains_noperspective_varying) {
187 GLuint delta = brw_varying_to_offset(&c->vue_map,
188 BRW_VARYING_SLOT_NDC);
189 struct brw_reg tmp = get_tmp(c);
190 t_nopersp = get_tmp(c);
191
192 /* t_nopersp = vec4(v1.xy, dest.xy) */
193 brw_MOV(p, t_nopersp, deref_4f(v1_ptr, delta));
194 brw_MOV(p, tmp, deref_4f(dest_ptr, delta));
195 brw_set_default_access_mode(p, BRW_ALIGN_16);
196 brw_MOV(p,
197 brw_writemask(t_nopersp, WRITEMASK_ZW),
198 brw_swizzle(tmp, BRW_SWIZZLE_XYXY));
199
200 /* t_nopersp = vec4(v1.xy, dest.xy) - v0.xyxy */
201 brw_ADD(p, t_nopersp, t_nopersp,
202 negate(brw_swizzle(v0_ndc_copy, BRW_SWIZZLE_XYXY)));
203
204 /* Add the absolute values of the X and Y deltas so that if
205 * the points aren't in the same place on the screen we get
206 * nonzero values to divide.
207 *
208 * After that, we have vert1 - vert0 in t_nopersp.x and
209 * vertnew - vert0 in t_nopersp.y
210 *
211 * t_nopersp = vec2(|v1.x -v0.x| + |v1.y -v0.y|,
212 * |dest.x-v0.x| + |dest.y-v0.y|)
213 */
214 brw_ADD(p,
215 brw_writemask(t_nopersp, WRITEMASK_XY),
216 brw_abs(brw_swizzle(t_nopersp, BRW_SWIZZLE_XZXZ)),
217 brw_abs(brw_swizzle(t_nopersp, BRW_SWIZZLE_YWYW)));
218 brw_set_default_access_mode(p, BRW_ALIGN_1);
219
220 /* If the points are in the same place, just substitute a
221 * value to avoid divide-by-zero
222 */
223 brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ,
224 vec1(t_nopersp),
225 brw_imm_f(0));
226 brw_IF(p, BRW_EXECUTE_1);
227 brw_MOV(p, t_nopersp, brw_imm_vf4(brw_float_to_vf(1.0),
228 brw_float_to_vf(0.0),
229 brw_float_to_vf(0.0),
230 brw_float_to_vf(0.0)));
231 brw_ENDIF(p);
232
233 /* Now compute t_nopersp = t_nopersp.y/t_nopersp.x and broadcast it. */
234 brw_math_invert(p, get_element(t_nopersp, 0), get_element(t_nopersp, 0));
235 brw_MUL(p, vec1(t_nopersp), vec1(t_nopersp),
236 vec1(suboffset(t_nopersp, 1)));
237 brw_set_default_access_mode(p, BRW_ALIGN_16);
238 brw_MOV(p, t_nopersp, brw_swizzle(t_nopersp, BRW_SWIZZLE_XXXX));
239 brw_set_default_access_mode(p, BRW_ALIGN_1);
240
241 release_tmp(c, tmp);
242 release_tmp(c, v0_ndc_copy);
243 }
244
245 /* Now we can iterate over each attribute
246 * (could be done in pairs?)
247 */
248 for (slot = 0; slot < c->vue_map.num_slots; slot++) {
249 int varying = c->vue_map.slot_to_varying[slot];
250 GLuint delta = brw_vue_slot_to_offset(slot);
251
252 /* HPOS, NDC already handled above */
253 if (varying == VARYING_SLOT_POS || varying == BRW_VARYING_SLOT_NDC)
254 continue;
255
256
257 if (varying == VARYING_SLOT_EDGE) {
258 if (force_edgeflag)
259 brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1));
260 else
261 brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta));
262 } else if (varying == VARYING_SLOT_PSIZ) {
263 /* PSIZ doesn't need interpolation because it isn't used by the
264 * fragment shader.
265 */
266 } else if (varying < VARYING_SLOT_MAX) {
267 /* This is a true vertex result (and not a special value for the VUE
268 * header), so interpolate:
269 *
270 * New = attr0 + t*attr1 - t*attr0
271 *
272 * Unless the attribute is flat shaded -- in which case just copy
273 * from one of the sources (doesn't matter which; already copied from pv)
274 */
275 GLuint interp = c->key.interp_mode[slot];
276
277 if (interp != INTERP_MODE_FLAT) {
278 struct brw_reg tmp = get_tmp(c);
279 struct brw_reg t =
280 interp == INTERP_MODE_NOPERSPECTIVE ? t_nopersp : t0;
281
282 brw_MUL(p,
283 vec4(brw_null_reg()),
284 deref_4f(v1_ptr, delta),
285 t);
286
287 brw_MAC(p,
288 tmp,
289 negate(deref_4f(v0_ptr, delta)),
290 t);
291
292 brw_ADD(p,
293 deref_4f(dest_ptr, delta),
294 deref_4f(v0_ptr, delta),
295 tmp);
296
297 release_tmp(c, tmp);
298 }
299 else {
300 brw_MOV(p,
301 deref_4f(dest_ptr, delta),
302 deref_4f(v0_ptr, delta));
303 }
304 }
305 }
306
307 if (c->vue_map.num_slots % 2) {
308 GLuint delta = brw_vue_slot_to_offset(c->vue_map.num_slots);
309
310 brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0));
311 }
312
313 if (c->key.contains_noperspective_varying)
314 release_tmp(c, t_nopersp);
315 }
316
brw_clip_emit_vue(struct brw_clip_compile * c,struct brw_indirect vert,enum brw_urb_write_flags flags,GLuint header)317 void brw_clip_emit_vue(struct brw_clip_compile *c,
318 struct brw_indirect vert,
319 enum brw_urb_write_flags flags,
320 GLuint header)
321 {
322 struct brw_codegen *p = &c->func;
323 bool allocate = flags & BRW_URB_WRITE_ALLOCATE;
324
325 brw_clip_ff_sync(c);
326
327 /* Any URB entry that is allocated must subsequently be used or discarded,
328 * so it doesn't make sense to mark EOT and ALLOCATE at the same time.
329 */
330 assert(!(allocate && (flags & BRW_URB_WRITE_EOT)));
331
332 /* Copy the vertex from vertn into m1..mN+1:
333 */
334 brw_copy_from_indirect(p, brw_message_reg(1), vert, c->nr_regs);
335
336 /* Overwrite PrimType and PrimStart in the message header, for
337 * each vertex in turn:
338 */
339 brw_MOV(p, get_element_ud(c->reg.R0, 2), brw_imm_ud(header));
340
341
342 /* Send each vertex as a separate write to the urb. This
343 * is different to the concept in brw_sf_emit.c, where
344 * subsequent writes are used to build up a single urb
345 * entry. Each of these writes instantiates a separate
346 * urb entry - (I think... what about 'allocate'?)
347 */
348 brw_urb_WRITE(p,
349 allocate ? c->reg.R0 : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
350 0,
351 c->reg.R0,
352 flags,
353 c->nr_regs + 1, /* msg length */
354 allocate ? 1 : 0, /* response_length */
355 0, /* urb offset */
356 BRW_URB_SWIZZLE_NONE);
357 }
358
359
360
brw_clip_kill_thread(struct brw_clip_compile * c)361 void brw_clip_kill_thread(struct brw_clip_compile *c)
362 {
363 struct brw_codegen *p = &c->func;
364
365 brw_clip_ff_sync(c);
366 /* Send an empty message to kill the thread and release any
367 * allocated urb entry:
368 */
369 brw_urb_WRITE(p,
370 retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
371 0,
372 c->reg.R0,
373 BRW_URB_WRITE_UNUSED | BRW_URB_WRITE_EOT_COMPLETE,
374 1, /* msg len */
375 0, /* response len */
376 0,
377 BRW_URB_SWIZZLE_NONE);
378 }
379
380
381
382
brw_clip_plane0_address(struct brw_clip_compile * c)383 struct brw_reg brw_clip_plane0_address( struct brw_clip_compile *c )
384 {
385 return brw_address(c->reg.fixed_planes);
386 }
387
388
brw_clip_plane_stride(struct brw_clip_compile * c)389 struct brw_reg brw_clip_plane_stride( struct brw_clip_compile *c )
390 {
391 if (c->key.nr_userclip) {
392 return brw_imm_uw(16);
393 }
394 else {
395 return brw_imm_uw(4);
396 }
397 }
398
399
400 /* Distribute flatshaded attributes from provoking vertex prior to
401 * clipping.
402 */
brw_clip_copy_flatshaded_attributes(struct brw_clip_compile * c,GLuint to,GLuint from)403 void brw_clip_copy_flatshaded_attributes( struct brw_clip_compile *c,
404 GLuint to, GLuint from )
405 {
406 struct brw_codegen *p = &c->func;
407
408 for (int i = 0; i < c->vue_map.num_slots; i++) {
409 if (c->key.interp_mode[i] == INTERP_MODE_FLAT) {
410 brw_MOV(p,
411 byte_offset(c->reg.vertex[to], brw_vue_slot_to_offset(i)),
412 byte_offset(c->reg.vertex[from], brw_vue_slot_to_offset(i)));
413 }
414 }
415 }
416
417
418
brw_clip_init_clipmask(struct brw_clip_compile * c)419 void brw_clip_init_clipmask( struct brw_clip_compile *c )
420 {
421 struct brw_codegen *p = &c->func;
422 struct brw_reg incoming = get_element_ud(c->reg.R0, 2);
423
424 /* Shift so that lowest outcode bit is rightmost:
425 */
426 brw_SHR(p, c->reg.planemask, incoming, brw_imm_ud(26));
427
428 if (c->key.nr_userclip) {
429 struct brw_reg tmp = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UD);
430
431 /* Rearrange userclip outcodes so that they come directly after
432 * the fixed plane bits.
433 */
434 if (p->devinfo->gen == 5 || p->devinfo->is_g4x)
435 brw_AND(p, tmp, incoming, brw_imm_ud(0xff<<14));
436 else
437 brw_AND(p, tmp, incoming, brw_imm_ud(0x3f<<14));
438
439 brw_SHR(p, tmp, tmp, brw_imm_ud(8));
440 brw_OR(p, c->reg.planemask, c->reg.planemask, tmp);
441
442 release_tmp(c, tmp);
443 }
444 }
445
brw_clip_ff_sync(struct brw_clip_compile * c)446 void brw_clip_ff_sync(struct brw_clip_compile *c)
447 {
448 struct brw_codegen *p = &c->func;
449
450 if (p->devinfo->gen == 5) {
451 brw_AND(p, brw_null_reg(), c->reg.ff_sync, brw_imm_ud(0x1));
452 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_Z);
453 brw_IF(p, BRW_EXECUTE_1);
454 {
455 brw_OR(p, c->reg.ff_sync, c->reg.ff_sync, brw_imm_ud(0x1));
456 brw_ff_sync(p,
457 c->reg.R0,
458 0,
459 c->reg.R0,
460 1, /* allocate */
461 1, /* response length */
462 0 /* eot */);
463 }
464 brw_ENDIF(p);
465 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
466 }
467 }
468
brw_clip_init_ff_sync(struct brw_clip_compile * c)469 void brw_clip_init_ff_sync(struct brw_clip_compile *c)
470 {
471 struct brw_codegen *p = &c->func;
472
473 if (p->devinfo->gen == 5) {
474 brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0));
475 }
476 }
477