1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * Tiling engine.
30 *
31 * Builds per-tile display lists and executes them on calls to
32 * lp_setup_flush().
33 */
34
35 #include <limits.h>
36
37 #include "pipe/p_defines.h"
38 #include "util/u_framebuffer.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_pack_color.h"
42 #include "util/u_viewport.h"
43 #include "draw/draw_pipe.h"
44 #include "util/os_time.h"
45 #include "lp_context.h"
46 #include "lp_memory.h"
47 #include "lp_scene.h"
48 #include "lp_texture.h"
49 #include "lp_debug.h"
50 #include "lp_fence.h"
51 #include "lp_query.h"
52 #include "lp_rast.h"
53 #include "lp_setup_context.h"
54 #include "lp_screen.h"
55 #include "lp_state.h"
56 #include "state_tracker/sw_winsys.h"
57
58 #include "draw/draw_context.h"
59 #include "draw/draw_vbuf.h"
60
61
62 static boolean set_scene_state( struct lp_setup_context *, enum setup_state,
63 const char *reason);
64 static boolean try_update_scene_state( struct lp_setup_context *setup );
65
66
67 static void
lp_setup_get_empty_scene(struct lp_setup_context * setup)68 lp_setup_get_empty_scene(struct lp_setup_context *setup)
69 {
70 assert(setup->scene == NULL);
71
72 setup->scene_idx++;
73 setup->scene_idx %= ARRAY_SIZE(setup->scenes);
74
75 setup->scene = setup->scenes[setup->scene_idx];
76
77 if (setup->scene->fence) {
78 if (LP_DEBUG & DEBUG_SETUP)
79 debug_printf("%s: wait for scene %d\n",
80 __FUNCTION__, setup->scene->fence->id);
81
82 lp_fence_wait(setup->scene->fence);
83 }
84
85 lp_scene_begin_binning(setup->scene, &setup->fb, setup->rasterizer_discard);
86
87 }
88
89
90 static void
first_triangle(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])91 first_triangle( struct lp_setup_context *setup,
92 const float (*v0)[4],
93 const float (*v1)[4],
94 const float (*v2)[4])
95 {
96 assert(setup->state == SETUP_ACTIVE);
97 lp_setup_choose_triangle( setup );
98 setup->triangle( setup, v0, v1, v2 );
99 }
100
101 static void
first_line(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4])102 first_line( struct lp_setup_context *setup,
103 const float (*v0)[4],
104 const float (*v1)[4])
105 {
106 assert(setup->state == SETUP_ACTIVE);
107 lp_setup_choose_line( setup );
108 setup->line( setup, v0, v1 );
109 }
110
111 static void
first_point(struct lp_setup_context * setup,const float (* v0)[4])112 first_point( struct lp_setup_context *setup,
113 const float (*v0)[4])
114 {
115 assert(setup->state == SETUP_ACTIVE);
116 lp_setup_choose_point( setup );
117 setup->point( setup, v0 );
118 }
119
lp_setup_reset(struct lp_setup_context * setup)120 void lp_setup_reset( struct lp_setup_context *setup )
121 {
122 unsigned i;
123
124 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
125
126 /* Reset derived state */
127 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
128 setup->constants[i].stored_size = 0;
129 setup->constants[i].stored_data = NULL;
130 }
131 setup->fs.stored = NULL;
132 setup->dirty = ~0;
133
134 /* no current bin */
135 setup->scene = NULL;
136
137 /* Reset some state:
138 */
139 memset(&setup->clear, 0, sizeof setup->clear);
140
141 /* Have an explicit "start-binning" call and get rid of this
142 * pointer twiddling?
143 */
144 setup->line = first_line;
145 setup->point = first_point;
146 setup->triangle = first_triangle;
147 }
148
149
150 /** Rasterize all scene's bins */
151 static void
lp_setup_rasterize_scene(struct lp_setup_context * setup)152 lp_setup_rasterize_scene( struct lp_setup_context *setup )
153 {
154 struct lp_scene *scene = setup->scene;
155 struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);
156
157 scene->num_active_queries = setup->active_binned_queries;
158 memcpy(scene->active_queries, setup->active_queries,
159 scene->num_active_queries * sizeof(scene->active_queries[0]));
160
161 lp_scene_end_binning(scene);
162
163 lp_fence_reference(&setup->last_fence, scene->fence);
164
165 if (setup->last_fence)
166 setup->last_fence->issued = TRUE;
167
168 mtx_lock(&screen->rast_mutex);
169
170 /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
171 * This means we never actually run any vertex stuff in parallel to
172 * rasterization (not in the same context at least) which is what the
173 * multiple scenes per setup is about - when we get a new empty scene
174 * any old one is already empty again because we waited here for
175 * raster tasks to be finished. Ideally, we shouldn't need to wait here
176 * and rely on fences elsewhere when waiting is necessary.
177 * Certainly, lp_scene_end_rasterization() would need to be deferred too
178 * and there's probably other bits why this doesn't actually work.
179 */
180 lp_rast_queue_scene(screen->rast, scene);
181 lp_rast_finish(screen->rast);
182 mtx_unlock(&screen->rast_mutex);
183
184 lp_scene_end_rasterization(setup->scene);
185 lp_setup_reset( setup );
186
187 LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
188 }
189
190
191
192 static boolean
begin_binning(struct lp_setup_context * setup)193 begin_binning( struct lp_setup_context *setup )
194 {
195 struct lp_scene *scene = setup->scene;
196 boolean need_zsload = FALSE;
197 boolean ok;
198
199 assert(scene);
200 assert(scene->fence == NULL);
201
202 /* Always create a fence:
203 */
204 scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
205 if (!scene->fence)
206 return FALSE;
207
208 ok = try_update_scene_state(setup);
209 if (!ok)
210 return FALSE;
211
212 if (setup->fb.zsbuf &&
213 ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
214 util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
215 need_zsload = TRUE;
216
217 LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
218 setup->clear.flags >> 2,
219 need_zsload ? "clear": "load");
220
221 if (setup->clear.flags & PIPE_CLEAR_COLOR) {
222 unsigned cbuf;
223 for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
224 assert(PIPE_CLEAR_COLOR0 == 1 << 2);
225 if (setup->clear.flags & (1 << (2 + cbuf))) {
226 union lp_rast_cmd_arg clearrb_arg;
227 struct lp_rast_clear_rb *cc_scene =
228 (struct lp_rast_clear_rb *)
229 lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));
230
231 if (!cc_scene) {
232 return FALSE;
233 }
234
235 cc_scene->cbuf = cbuf;
236 cc_scene->color_val = setup->clear.color_val[cbuf];
237 clearrb_arg.clear_rb = cc_scene;
238
239 if (!lp_scene_bin_everywhere(scene,
240 LP_RAST_OP_CLEAR_COLOR,
241 clearrb_arg))
242 return FALSE;
243 }
244 }
245 }
246
247 if (setup->fb.zsbuf) {
248 if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
249 ok = lp_scene_bin_everywhere( scene,
250 LP_RAST_OP_CLEAR_ZSTENCIL,
251 lp_rast_arg_clearzs(
252 setup->clear.zsvalue,
253 setup->clear.zsmask));
254 if (!ok)
255 return FALSE;
256 }
257 }
258
259 setup->clear.flags = 0;
260 setup->clear.zsmask = 0;
261 setup->clear.zsvalue = 0;
262
263 scene->had_queries = !!setup->active_binned_queries;
264
265 LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
266 return TRUE;
267 }
268
269
270 /* This basically bins and then flushes any outstanding full-screen
271 * clears.
272 *
273 * TODO: fast path for fullscreen clears and no triangles.
274 */
275 static boolean
execute_clears(struct lp_setup_context * setup)276 execute_clears( struct lp_setup_context *setup )
277 {
278 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
279
280 return begin_binning( setup );
281 }
282
283 const char *states[] = {
284 "FLUSHED",
285 "CLEARED",
286 "ACTIVE "
287 };
288
289
290 static boolean
set_scene_state(struct lp_setup_context * setup,enum setup_state new_state,const char * reason)291 set_scene_state( struct lp_setup_context *setup,
292 enum setup_state new_state,
293 const char *reason)
294 {
295 unsigned old_state = setup->state;
296
297 if (old_state == new_state)
298 return TRUE;
299
300 if (LP_DEBUG & DEBUG_SCENE) {
301 debug_printf("%s old %s new %s%s%s\n",
302 __FUNCTION__,
303 states[old_state],
304 states[new_state],
305 (new_state == SETUP_FLUSHED) ? ": " : "",
306 (new_state == SETUP_FLUSHED) ? reason : "");
307
308 if (new_state == SETUP_FLUSHED && setup->scene)
309 lp_debug_draw_bins_by_cmd_length(setup->scene);
310 }
311
312 /* wait for a free/empty scene
313 */
314 if (old_state == SETUP_FLUSHED)
315 lp_setup_get_empty_scene(setup);
316
317 switch (new_state) {
318 case SETUP_CLEARED:
319 break;
320
321 case SETUP_ACTIVE:
322 if (!begin_binning( setup ))
323 goto fail;
324 break;
325
326 case SETUP_FLUSHED:
327 if (old_state == SETUP_CLEARED)
328 if (!execute_clears( setup ))
329 goto fail;
330
331 lp_setup_rasterize_scene( setup );
332 assert(setup->scene == NULL);
333 break;
334
335 default:
336 assert(0 && "invalid setup state mode");
337 goto fail;
338 }
339
340 setup->state = new_state;
341 return TRUE;
342
343 fail:
344 if (setup->scene) {
345 lp_scene_end_rasterization(setup->scene);
346 setup->scene = NULL;
347 }
348
349 setup->state = SETUP_FLUSHED;
350 lp_setup_reset( setup );
351 return FALSE;
352 }
353
354
355 void
lp_setup_flush(struct lp_setup_context * setup,struct pipe_fence_handle ** fence,const char * reason)356 lp_setup_flush( struct lp_setup_context *setup,
357 struct pipe_fence_handle **fence,
358 const char *reason)
359 {
360 set_scene_state( setup, SETUP_FLUSHED, reason );
361
362 if (fence) {
363 lp_fence_reference((struct lp_fence **)fence, setup->last_fence);
364 }
365 }
366
367
368 void
lp_setup_bind_framebuffer(struct lp_setup_context * setup,const struct pipe_framebuffer_state * fb)369 lp_setup_bind_framebuffer( struct lp_setup_context *setup,
370 const struct pipe_framebuffer_state *fb )
371 {
372 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
373
374 /* Flush any old scene.
375 */
376 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
377
378 /*
379 * Ensure the old scene is not reused.
380 */
381 assert(!setup->scene);
382
383 /* Set new state. This will be picked up later when we next need a
384 * scene.
385 */
386 util_copy_framebuffer_state(&setup->fb, fb);
387 setup->framebuffer.x0 = 0;
388 setup->framebuffer.y0 = 0;
389 setup->framebuffer.x1 = fb->width-1;
390 setup->framebuffer.y1 = fb->height-1;
391 setup->dirty |= LP_SETUP_NEW_SCISSOR;
392 }
393
394
395 /*
396 * Try to clear one color buffer of the attached fb, either by binning a clear
397 * command or queuing up the clear for later (when binning is started).
398 */
399 static boolean
lp_setup_try_clear_color_buffer(struct lp_setup_context * setup,const union pipe_color_union * color,unsigned cbuf)400 lp_setup_try_clear_color_buffer(struct lp_setup_context *setup,
401 const union pipe_color_union *color,
402 unsigned cbuf)
403 {
404 union lp_rast_cmd_arg clearrb_arg;
405 union util_color uc;
406 enum pipe_format format = setup->fb.cbufs[cbuf]->format;
407
408 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
409
410 if (util_format_is_pure_integer(format)) {
411 /*
412 * We expect int/uint clear values here, though some APIs
413 * might disagree (but in any case util_pack_color()
414 * couldn't handle it)...
415 */
416 if (util_format_is_pure_sint(format)) {
417 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1);
418 }
419 else {
420 assert(util_format_is_pure_uint(format));
421 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
422 }
423 }
424 else {
425 util_pack_color(color->f, format, &uc);
426 }
427
428 if (setup->state == SETUP_ACTIVE) {
429 struct lp_scene *scene = setup->scene;
430
431 /* Add the clear to existing scene. In the unusual case where
432 * both color and depth-stencil are being cleared when there's
433 * already been some rendering, we could discard the currently
434 * binned scene and start again, but I don't see that as being
435 * a common usage.
436 */
437 struct lp_rast_clear_rb *cc_scene =
438 (struct lp_rast_clear_rb *)
439 lp_scene_alloc_aligned(scene, sizeof(struct lp_rast_clear_rb), 8);
440
441 if (!cc_scene) {
442 return FALSE;
443 }
444
445 cc_scene->cbuf = cbuf;
446 cc_scene->color_val = uc;
447 clearrb_arg.clear_rb = cc_scene;
448
449 if (!lp_scene_bin_everywhere(scene,
450 LP_RAST_OP_CLEAR_COLOR,
451 clearrb_arg))
452 return FALSE;
453 }
454 else {
455 /* Put ourselves into the 'pre-clear' state, specifically to try
456 * and accumulate multiple clears to color and depth_stencil
457 * buffers which the app or state-tracker might issue
458 * separately.
459 */
460 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
461
462 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
463 setup->clear.flags |= 1 << (cbuf + 2);
464 setup->clear.color_val[cbuf] = uc;
465 }
466
467 return TRUE;
468 }
469
470 static boolean
lp_setup_try_clear_zs(struct lp_setup_context * setup,double depth,unsigned stencil,unsigned flags)471 lp_setup_try_clear_zs(struct lp_setup_context *setup,
472 double depth,
473 unsigned stencil,
474 unsigned flags)
475 {
476 uint64_t zsmask = 0;
477 uint64_t zsvalue = 0;
478 uint32_t zmask32;
479 uint8_t smask8;
480 enum pipe_format format = setup->fb.zsbuf->format;
481
482 LP_DBG(DEBUG_SETUP, "%s state %d\n", __FUNCTION__, setup->state);
483
484 zmask32 = (flags & PIPE_CLEAR_DEPTH) ? ~0 : 0;
485 smask8 = (flags & PIPE_CLEAR_STENCIL) ? ~0 : 0;
486
487 zsvalue = util_pack64_z_stencil(format, depth, stencil);
488
489 zsmask = util_pack64_mask_z_stencil(format, zmask32, smask8);
490
491 zsvalue &= zsmask;
492
493 if (format == PIPE_FORMAT_Z24X8_UNORM ||
494 format == PIPE_FORMAT_X8Z24_UNORM) {
495 /*
496 * Make full mask if there's "X" bits so we can do full
497 * clear (without rmw).
498 */
499 uint32_t zsmask_full = 0;
500 zsmask_full = util_pack_mask_z_stencil(format, ~0, ~0);
501 zsmask |= ~zsmask_full;
502 }
503
504 if (setup->state == SETUP_ACTIVE) {
505 struct lp_scene *scene = setup->scene;
506
507 /* Add the clear to existing scene. In the unusual case where
508 * both color and depth-stencil are being cleared when there's
509 * already been some rendering, we could discard the currently
510 * binned scene and start again, but I don't see that as being
511 * a common usage.
512 */
513 if (!lp_scene_bin_everywhere(scene,
514 LP_RAST_OP_CLEAR_ZSTENCIL,
515 lp_rast_arg_clearzs(zsvalue, zsmask)))
516 return FALSE;
517 }
518 else {
519 /* Put ourselves into the 'pre-clear' state, specifically to try
520 * and accumulate multiple clears to color and depth_stencil
521 * buffers which the app or state-tracker might issue
522 * separately.
523 */
524 set_scene_state( setup, SETUP_CLEARED, __FUNCTION__ );
525
526 setup->clear.flags |= flags;
527
528 setup->clear.zsmask |= zsmask;
529 setup->clear.zsvalue =
530 (setup->clear.zsvalue & ~zsmask) | (zsvalue & zsmask);
531 }
532
533 return TRUE;
534 }
535
536 void
lp_setup_clear(struct lp_setup_context * setup,const union pipe_color_union * color,double depth,unsigned stencil,unsigned flags)537 lp_setup_clear( struct lp_setup_context *setup,
538 const union pipe_color_union *color,
539 double depth,
540 unsigned stencil,
541 unsigned flags )
542 {
543 unsigned i;
544
545 /*
546 * Note any of these (max 9) clears could fail (but at most there should
547 * be just one failure!). This avoids doing the previous succeeded
548 * clears again (we still clear tiles twice if a clear command succeeded
549 * partially for one buffer).
550 */
551 if (flags & PIPE_CLEAR_DEPTHSTENCIL) {
552 unsigned flagszs = flags & PIPE_CLEAR_DEPTHSTENCIL;
553 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs)) {
554 lp_setup_flush(setup, NULL, __FUNCTION__);
555
556 if (!lp_setup_try_clear_zs(setup, depth, stencil, flagszs))
557 assert(0);
558 }
559 }
560
561 if (flags & PIPE_CLEAR_COLOR) {
562 assert(PIPE_CLEAR_COLOR0 == (1 << 2));
563 for (i = 0; i < setup->fb.nr_cbufs; i++) {
564 if ((flags & (1 << (2 + i))) && setup->fb.cbufs[i]) {
565 if (!lp_setup_try_clear_color_buffer(setup, color, i)) {
566 lp_setup_flush(setup, NULL, __FUNCTION__);
567
568 if (!lp_setup_try_clear_color_buffer(setup, color, i))
569 assert(0);
570 }
571 }
572 }
573 }
574 }
575
576
577
578 void
lp_setup_set_triangle_state(struct lp_setup_context * setup,unsigned cull_mode,boolean ccw_is_frontface,boolean scissor,boolean half_pixel_center,boolean bottom_edge_rule)579 lp_setup_set_triangle_state( struct lp_setup_context *setup,
580 unsigned cull_mode,
581 boolean ccw_is_frontface,
582 boolean scissor,
583 boolean half_pixel_center,
584 boolean bottom_edge_rule)
585 {
586 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
587
588 setup->ccw_is_frontface = ccw_is_frontface;
589 setup->cullmode = cull_mode;
590 setup->triangle = first_triangle;
591 setup->pixel_offset = half_pixel_center ? 0.5f : 0.0f;
592 setup->bottom_edge_rule = bottom_edge_rule;
593
594 if (setup->scissor_test != scissor) {
595 setup->dirty |= LP_SETUP_NEW_SCISSOR;
596 setup->scissor_test = scissor;
597 }
598 }
599
600 void
lp_setup_set_line_state(struct lp_setup_context * setup,float line_width)601 lp_setup_set_line_state( struct lp_setup_context *setup,
602 float line_width)
603 {
604 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
605
606 setup->line_width = line_width;
607 }
608
609 void
lp_setup_set_point_state(struct lp_setup_context * setup,float point_size,boolean point_size_per_vertex,uint sprite_coord_enable,uint sprite_coord_origin)610 lp_setup_set_point_state( struct lp_setup_context *setup,
611 float point_size,
612 boolean point_size_per_vertex,
613 uint sprite_coord_enable,
614 uint sprite_coord_origin)
615 {
616 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
617
618 setup->point_size = point_size;
619 setup->sprite_coord_enable = sprite_coord_enable;
620 setup->sprite_coord_origin = sprite_coord_origin;
621 setup->point_size_per_vertex = point_size_per_vertex;
622 }
623
624 void
lp_setup_set_setup_variant(struct lp_setup_context * setup,const struct lp_setup_variant * variant)625 lp_setup_set_setup_variant( struct lp_setup_context *setup,
626 const struct lp_setup_variant *variant)
627 {
628 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
629
630 setup->setup.variant = variant;
631 }
632
633 void
lp_setup_set_fs_variant(struct lp_setup_context * setup,struct lp_fragment_shader_variant * variant)634 lp_setup_set_fs_variant( struct lp_setup_context *setup,
635 struct lp_fragment_shader_variant *variant)
636 {
637 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__,
638 variant);
639 /* FIXME: reference count */
640
641 setup->fs.current.variant = variant;
642 setup->dirty |= LP_SETUP_NEW_FS;
643 }
644
645 void
lp_setup_set_fs_constants(struct lp_setup_context * setup,unsigned num,struct pipe_constant_buffer * buffers)646 lp_setup_set_fs_constants(struct lp_setup_context *setup,
647 unsigned num,
648 struct pipe_constant_buffer *buffers)
649 {
650 unsigned i;
651
652 LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
653
654 assert(num <= ARRAY_SIZE(setup->constants));
655
656 for (i = 0; i < num; ++i) {
657 util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
658 }
659 for (; i < ARRAY_SIZE(setup->constants); i++) {
660 util_copy_constant_buffer(&setup->constants[i].current, NULL);
661 }
662 setup->dirty |= LP_SETUP_NEW_CONSTANTS;
663 }
664
665
666 void
lp_setup_set_alpha_ref_value(struct lp_setup_context * setup,float alpha_ref_value)667 lp_setup_set_alpha_ref_value( struct lp_setup_context *setup,
668 float alpha_ref_value )
669 {
670 LP_DBG(DEBUG_SETUP, "%s %f\n", __FUNCTION__, alpha_ref_value);
671
672 if(setup->fs.current.jit_context.alpha_ref_value != alpha_ref_value) {
673 setup->fs.current.jit_context.alpha_ref_value = alpha_ref_value;
674 setup->dirty |= LP_SETUP_NEW_FS;
675 }
676 }
677
678 void
lp_setup_set_stencil_ref_values(struct lp_setup_context * setup,const ubyte refs[2])679 lp_setup_set_stencil_ref_values( struct lp_setup_context *setup,
680 const ubyte refs[2] )
681 {
682 LP_DBG(DEBUG_SETUP, "%s %d %d\n", __FUNCTION__, refs[0], refs[1]);
683
684 if (setup->fs.current.jit_context.stencil_ref_front != refs[0] ||
685 setup->fs.current.jit_context.stencil_ref_back != refs[1]) {
686 setup->fs.current.jit_context.stencil_ref_front = refs[0];
687 setup->fs.current.jit_context.stencil_ref_back = refs[1];
688 setup->dirty |= LP_SETUP_NEW_FS;
689 }
690 }
691
692 void
lp_setup_set_blend_color(struct lp_setup_context * setup,const struct pipe_blend_color * blend_color)693 lp_setup_set_blend_color( struct lp_setup_context *setup,
694 const struct pipe_blend_color *blend_color )
695 {
696 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
697
698 assert(blend_color);
699
700 if(memcmp(&setup->blend_color.current, blend_color, sizeof *blend_color) != 0) {
701 memcpy(&setup->blend_color.current, blend_color, sizeof *blend_color);
702 setup->dirty |= LP_SETUP_NEW_BLEND_COLOR;
703 }
704 }
705
706
707 void
lp_setup_set_scissors(struct lp_setup_context * setup,const struct pipe_scissor_state * scissors)708 lp_setup_set_scissors( struct lp_setup_context *setup,
709 const struct pipe_scissor_state *scissors )
710 {
711 unsigned i;
712 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
713
714 assert(scissors);
715
716 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
717 setup->scissors[i].x0 = scissors[i].minx;
718 setup->scissors[i].x1 = scissors[i].maxx-1;
719 setup->scissors[i].y0 = scissors[i].miny;
720 setup->scissors[i].y1 = scissors[i].maxy-1;
721 }
722 setup->dirty |= LP_SETUP_NEW_SCISSOR;
723 }
724
725
726 void
lp_setup_set_flatshade_first(struct lp_setup_context * setup,boolean flatshade_first)727 lp_setup_set_flatshade_first( struct lp_setup_context *setup,
728 boolean flatshade_first )
729 {
730 setup->flatshade_first = flatshade_first;
731 }
732
733 void
lp_setup_set_rasterizer_discard(struct lp_setup_context * setup,boolean rasterizer_discard)734 lp_setup_set_rasterizer_discard( struct lp_setup_context *setup,
735 boolean rasterizer_discard )
736 {
737 if (setup->rasterizer_discard != rasterizer_discard) {
738 setup->rasterizer_discard = rasterizer_discard;
739 set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ );
740 }
741 }
742
743 void
lp_setup_set_vertex_info(struct lp_setup_context * setup,struct vertex_info * vertex_info)744 lp_setup_set_vertex_info( struct lp_setup_context *setup,
745 struct vertex_info *vertex_info )
746 {
747 /* XXX: just silently holding onto the pointer:
748 */
749 setup->vertex_info = vertex_info;
750 }
751
752
753 /**
754 * Called during state validation when LP_NEW_VIEWPORT is set.
755 */
756 void
lp_setup_set_viewports(struct lp_setup_context * setup,unsigned num_viewports,const struct pipe_viewport_state * viewports)757 lp_setup_set_viewports(struct lp_setup_context *setup,
758 unsigned num_viewports,
759 const struct pipe_viewport_state *viewports)
760 {
761 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
762 unsigned i;
763
764 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
765
766 assert(num_viewports <= PIPE_MAX_VIEWPORTS);
767 assert(viewports);
768
769 /*
770 * For use in lp_state_fs.c, propagate the viewport values for all viewports.
771 */
772 for (i = 0; i < num_viewports; i++) {
773 float min_depth;
774 float max_depth;
775 util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz,
776 &min_depth, &max_depth);
777
778 if (setup->viewports[i].min_depth != min_depth ||
779 setup->viewports[i].max_depth != max_depth) {
780 setup->viewports[i].min_depth = min_depth;
781 setup->viewports[i].max_depth = max_depth;
782 setup->dirty |= LP_SETUP_NEW_VIEWPORTS;
783 }
784 }
785 }
786
787
788 /**
789 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
790 */
791 void
lp_setup_set_fragment_sampler_views(struct lp_setup_context * setup,unsigned num,struct pipe_sampler_view ** views)792 lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
793 unsigned num,
794 struct pipe_sampler_view **views)
795 {
796 unsigned i, max_tex_num;
797
798 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
799
800 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
801
802 max_tex_num = MAX2(num, setup->fs.current_tex_num);
803
804 for (i = 0; i < max_tex_num; i++) {
805 struct pipe_sampler_view *view = i < num ? views[i] : NULL;
806
807 if (view) {
808 struct pipe_resource *res = view->texture;
809 struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
810 struct lp_jit_texture *jit_tex;
811 jit_tex = &setup->fs.current.jit_context.textures[i];
812
813 /* We're referencing the texture's internal data, so save a
814 * reference to it.
815 */
816 pipe_resource_reference(&setup->fs.current_tex[i], res);
817
818 if (!lp_tex->dt) {
819 /* regular texture - setup array of mipmap level offsets */
820 int j;
821 unsigned first_level = 0;
822 unsigned last_level = 0;
823
824 if (llvmpipe_resource_is_texture(res)) {
825 first_level = view->u.tex.first_level;
826 last_level = view->u.tex.last_level;
827 assert(first_level <= last_level);
828 assert(last_level <= res->last_level);
829 jit_tex->base = lp_tex->tex_data;
830 }
831 else {
832 jit_tex->base = lp_tex->data;
833 }
834
835 if (LP_PERF & PERF_TEX_MEM) {
836 /* use dummy tile memory */
837 jit_tex->base = lp_dummy_tile;
838 jit_tex->width = TILE_SIZE/8;
839 jit_tex->height = TILE_SIZE/8;
840 jit_tex->depth = 1;
841 jit_tex->first_level = 0;
842 jit_tex->last_level = 0;
843 jit_tex->mip_offsets[0] = 0;
844 jit_tex->row_stride[0] = 0;
845 jit_tex->img_stride[0] = 0;
846 }
847 else {
848 jit_tex->width = res->width0;
849 jit_tex->height = res->height0;
850 jit_tex->depth = res->depth0;
851 jit_tex->first_level = first_level;
852 jit_tex->last_level = last_level;
853
854 if (llvmpipe_resource_is_texture(res)) {
855 for (j = first_level; j <= last_level; j++) {
856 jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
857 jit_tex->row_stride[j] = lp_tex->row_stride[j];
858 jit_tex->img_stride[j] = lp_tex->img_stride[j];
859 }
860
861 if (res->target == PIPE_TEXTURE_1D_ARRAY ||
862 res->target == PIPE_TEXTURE_2D_ARRAY ||
863 res->target == PIPE_TEXTURE_CUBE ||
864 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
865 /*
866 * For array textures, we don't have first_layer, instead
867 * adjust last_layer (stored as depth) plus the mip level offsets
868 * (as we have mip-first layout can't just adjust base ptr).
869 * XXX For mip levels, could do something similar.
870 */
871 jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
872 for (j = first_level; j <= last_level; j++) {
873 jit_tex->mip_offsets[j] += view->u.tex.first_layer *
874 lp_tex->img_stride[j];
875 }
876 if (view->target == PIPE_TEXTURE_CUBE ||
877 view->target == PIPE_TEXTURE_CUBE_ARRAY) {
878 assert(jit_tex->depth % 6 == 0);
879 }
880 assert(view->u.tex.first_layer <= view->u.tex.last_layer);
881 assert(view->u.tex.last_layer < res->array_size);
882 }
883 }
884 else {
885 /*
886 * For buffers, we don't have "offset", instead adjust
887 * the size (stored as width) plus the base pointer.
888 */
889 unsigned view_blocksize = util_format_get_blocksize(view->format);
890 /* probably don't really need to fill that out */
891 jit_tex->mip_offsets[0] = 0;
892 jit_tex->row_stride[0] = 0;
893 jit_tex->img_stride[0] = 0;
894
895 /* everything specified in number of elements here. */
896 jit_tex->width = view->u.buf.size / view_blocksize;
897 jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
898 /* XXX Unsure if we need to sanitize parameters? */
899 assert(view->u.buf.offset + view->u.buf.size <= res->width0);
900 }
901 }
902 }
903 else {
904 /* display target texture/surface */
905 /*
906 * XXX: Where should this be unmapped?
907 */
908 struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
909 struct sw_winsys *winsys = screen->winsys;
910 jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
911 PIPE_TRANSFER_READ);
912 jit_tex->row_stride[0] = lp_tex->row_stride[0];
913 jit_tex->img_stride[0] = lp_tex->img_stride[0];
914 jit_tex->mip_offsets[0] = 0;
915 jit_tex->width = res->width0;
916 jit_tex->height = res->height0;
917 jit_tex->depth = res->depth0;
918 jit_tex->first_level = jit_tex->last_level = 0;
919 assert(jit_tex->base);
920 }
921 }
922 else {
923 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
924 }
925 }
926 setup->fs.current_tex_num = num;
927
928 setup->dirty |= LP_SETUP_NEW_FS;
929 }
930
931
932 /**
933 * Called during state validation when LP_NEW_SAMPLER is set.
934 */
935 void
lp_setup_set_fragment_sampler_state(struct lp_setup_context * setup,unsigned num,struct pipe_sampler_state ** samplers)936 lp_setup_set_fragment_sampler_state(struct lp_setup_context *setup,
937 unsigned num,
938 struct pipe_sampler_state **samplers)
939 {
940 unsigned i;
941
942 LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
943
944 assert(num <= PIPE_MAX_SAMPLERS);
945
946 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
947 const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
948
949 if (sampler) {
950 struct lp_jit_sampler *jit_sam;
951 jit_sam = &setup->fs.current.jit_context.samplers[i];
952
953 jit_sam->min_lod = sampler->min_lod;
954 jit_sam->max_lod = sampler->max_lod;
955 jit_sam->lod_bias = sampler->lod_bias;
956 COPY_4V(jit_sam->border_color, sampler->border_color.f);
957 }
958 }
959
960 setup->dirty |= LP_SETUP_NEW_FS;
961 }
962
963
964 /**
965 * Is the given texture referenced by any scene?
966 * Note: we have to check all scenes including any scenes currently
967 * being rendered and the current scene being built.
968 */
969 unsigned
lp_setup_is_resource_referenced(const struct lp_setup_context * setup,const struct pipe_resource * texture)970 lp_setup_is_resource_referenced( const struct lp_setup_context *setup,
971 const struct pipe_resource *texture )
972 {
973 unsigned i;
974
975 /* check the render targets */
976 for (i = 0; i < setup->fb.nr_cbufs; i++) {
977 if (setup->fb.cbufs[i] && setup->fb.cbufs[i]->texture == texture)
978 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
979 }
980 if (setup->fb.zsbuf && setup->fb.zsbuf->texture == texture) {
981 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
982 }
983
984 /* check textures referenced by the scene */
985 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
986 if (lp_scene_is_resource_referenced(setup->scenes[i], texture)) {
987 return LP_REFERENCED_FOR_READ;
988 }
989 }
990
991 return LP_UNREFERENCED;
992 }
993
994
995 /**
996 * Called by vbuf code when we're about to draw something.
997 *
998 * This function stores all dirty state in the current scene's display list
999 * memory, via lp_scene_alloc(). We can not pass pointers of mutable state to
1000 * the JIT functions, as the JIT functions will be called later on, most likely
1001 * on a different thread.
1002 *
1003 * When processing dirty state it is imperative that we don't refer to any
1004 * pointers previously allocated with lp_scene_alloc() in this function (or any
1005 * function) as they may belong to a scene freed since then.
1006 */
1007 static boolean
try_update_scene_state(struct lp_setup_context * setup)1008 try_update_scene_state( struct lp_setup_context *setup )
1009 {
1010 static const float fake_const_buf[4];
1011 boolean new_scene = (setup->fs.stored == NULL);
1012 struct lp_scene *scene = setup->scene;
1013 unsigned i;
1014
1015 assert(scene);
1016
1017 if (setup->dirty & LP_SETUP_NEW_VIEWPORTS) {
1018 /*
1019 * Record new depth range state for changes due to viewport updates.
1020 *
1021 * TODO: Collapse the existing viewport and depth range information
1022 * into one structure, for access by JIT.
1023 */
1024 struct lp_jit_viewport *stored;
1025
1026 stored = (struct lp_jit_viewport *)
1027 lp_scene_alloc(scene, sizeof setup->viewports);
1028
1029 if (!stored) {
1030 assert(!new_scene);
1031 return FALSE;
1032 }
1033
1034 memcpy(stored, setup->viewports, sizeof setup->viewports);
1035
1036 setup->fs.current.jit_context.viewports = stored;
1037 setup->dirty |= LP_SETUP_NEW_FS;
1038 }
1039
1040 if(setup->dirty & LP_SETUP_NEW_BLEND_COLOR) {
1041 uint8_t *stored;
1042 float* fstored;
1043 unsigned i, j;
1044 unsigned size;
1045
1046 /* Alloc u8_blend_color (16 x i8) and f_blend_color (4 or 8 x f32) */
1047 size = 4 * 16 * sizeof(uint8_t);
1048 size += (LP_MAX_VECTOR_LENGTH / 4) * sizeof(float);
1049 stored = lp_scene_alloc_aligned(scene, size, LP_MIN_VECTOR_ALIGN);
1050
1051 if (!stored) {
1052 assert(!new_scene);
1053 return FALSE;
1054 }
1055
1056 /* Store floating point colour */
1057 fstored = (float*)(stored + 4*16);
1058 for (i = 0; i < (LP_MAX_VECTOR_LENGTH / 4); ++i) {
1059 fstored[i] = setup->blend_color.current.color[i % 4];
1060 }
1061
1062 /* smear each blend color component across 16 ubyte elements */
1063 for (i = 0; i < 4; ++i) {
1064 uint8_t c = float_to_ubyte(setup->blend_color.current.color[i]);
1065 for (j = 0; j < 16; ++j)
1066 stored[i*16 + j] = c;
1067 }
1068
1069 setup->blend_color.stored = stored;
1070 setup->fs.current.jit_context.u8_blend_color = stored;
1071 setup->fs.current.jit_context.f_blend_color = fstored;
1072 setup->dirty |= LP_SETUP_NEW_FS;
1073 }
1074
1075 if (setup->dirty & LP_SETUP_NEW_CONSTANTS) {
1076 for (i = 0; i < ARRAY_SIZE(setup->constants); ++i) {
1077 struct pipe_resource *buffer = setup->constants[i].current.buffer;
1078 const unsigned current_size = MIN2(setup->constants[i].current.buffer_size,
1079 LP_MAX_TGSI_CONST_BUFFER_SIZE);
1080 const ubyte *current_data = NULL;
1081 int num_constants;
1082
1083 STATIC_ASSERT(DATA_BLOCK_SIZE >= LP_MAX_TGSI_CONST_BUFFER_SIZE);
1084
1085 if (buffer) {
1086 /* resource buffer */
1087 current_data = (ubyte *) llvmpipe_resource_data(buffer);
1088 }
1089 else if (setup->constants[i].current.user_buffer) {
1090 /* user-space buffer */
1091 current_data = (ubyte *) setup->constants[i].current.user_buffer;
1092 }
1093
1094 if (current_data) {
1095 current_data += setup->constants[i].current.buffer_offset;
1096
1097 /* TODO: copy only the actually used constants? */
1098
1099 if (setup->constants[i].stored_size != current_size ||
1100 !setup->constants[i].stored_data ||
1101 memcmp(setup->constants[i].stored_data,
1102 current_data,
1103 current_size) != 0) {
1104 void *stored;
1105
1106 stored = lp_scene_alloc(scene, current_size);
1107 if (!stored) {
1108 assert(!new_scene);
1109 return FALSE;
1110 }
1111
1112 memcpy(stored,
1113 current_data,
1114 current_size);
1115 setup->constants[i].stored_size = current_size;
1116 setup->constants[i].stored_data = stored;
1117 }
1118 setup->fs.current.jit_context.constants[i] =
1119 setup->constants[i].stored_data;
1120 }
1121 else {
1122 setup->constants[i].stored_size = 0;
1123 setup->constants[i].stored_data = NULL;
1124 setup->fs.current.jit_context.constants[i] = fake_const_buf;
1125 }
1126
1127 num_constants =
1128 setup->constants[i].stored_size / (sizeof(float) * 4);
1129 setup->fs.current.jit_context.num_constants[i] = num_constants;
1130 setup->dirty |= LP_SETUP_NEW_FS;
1131 }
1132 }
1133
1134
1135 if (setup->dirty & LP_SETUP_NEW_FS) {
1136 if (!setup->fs.stored ||
1137 memcmp(setup->fs.stored,
1138 &setup->fs.current,
1139 sizeof setup->fs.current) != 0)
1140 {
1141 struct lp_rast_state *stored;
1142
1143 /* The fs state that's been stored in the scene is different from
1144 * the new, current state. So allocate a new lp_rast_state object
1145 * and append it to the bin's setup data buffer.
1146 */
1147 stored = (struct lp_rast_state *) lp_scene_alloc(scene, sizeof *stored);
1148 if (!stored) {
1149 assert(!new_scene);
1150 return FALSE;
1151 }
1152
1153 memcpy(stored,
1154 &setup->fs.current,
1155 sizeof setup->fs.current);
1156 setup->fs.stored = stored;
1157
1158 /* The scene now references the textures in the rasterization
1159 * state record. Note that now.
1160 */
1161 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1162 if (setup->fs.current_tex[i]) {
1163 if (!lp_scene_add_resource_reference(scene,
1164 setup->fs.current_tex[i],
1165 new_scene)) {
1166 assert(!new_scene);
1167 return FALSE;
1168 }
1169 }
1170 }
1171 }
1172 }
1173
1174 if (setup->dirty & LP_SETUP_NEW_SCISSOR) {
1175 unsigned i;
1176 for (i = 0; i < PIPE_MAX_VIEWPORTS; ++i) {
1177 setup->draw_regions[i] = setup->framebuffer;
1178 if (setup->scissor_test) {
1179 u_rect_possible_intersection(&setup->scissors[i],
1180 &setup->draw_regions[i]);
1181 }
1182 }
1183 }
1184
1185 setup->dirty = 0;
1186
1187 assert(setup->fs.stored);
1188 return TRUE;
1189 }
1190
1191 boolean
lp_setup_update_state(struct lp_setup_context * setup,boolean update_scene)1192 lp_setup_update_state( struct lp_setup_context *setup,
1193 boolean update_scene )
1194 {
1195 /* Some of the 'draw' pipeline stages may have changed some driver state.
1196 * Make sure we've processed those state changes before anything else.
1197 *
1198 * XXX this is the only place where llvmpipe_context is used in the
1199 * setup code. This may get refactored/changed...
1200 */
1201 {
1202 struct llvmpipe_context *lp = llvmpipe_context(setup->pipe);
1203 if (lp->dirty) {
1204 llvmpipe_update_derived(lp);
1205 }
1206
1207 if (lp->setup->dirty) {
1208 llvmpipe_update_setup(lp);
1209 }
1210
1211 assert(setup->setup.variant);
1212
1213 /* Will probably need to move this somewhere else, just need
1214 * to know about vertex shader point size attribute.
1215 */
1216 setup->psize_slot = lp->psize_slot;
1217 setup->viewport_index_slot = lp->viewport_index_slot;
1218 setup->layer_slot = lp->layer_slot;
1219 setup->face_slot = lp->face_slot;
1220
1221 assert(lp->dirty == 0);
1222
1223 assert(lp->setup_variant.key.size ==
1224 setup->setup.variant->key.size);
1225
1226 assert(memcmp(&lp->setup_variant.key,
1227 &setup->setup.variant->key,
1228 setup->setup.variant->key.size) == 0);
1229 }
1230
1231 if (update_scene && setup->state != SETUP_ACTIVE) {
1232 if (!set_scene_state( setup, SETUP_ACTIVE, __FUNCTION__ ))
1233 return FALSE;
1234 }
1235
1236 /* Only call into update_scene_state() if we already have a
1237 * scene:
1238 */
1239 if (update_scene && setup->scene) {
1240 assert(setup->state == SETUP_ACTIVE);
1241
1242 if (try_update_scene_state(setup))
1243 return TRUE;
1244
1245 /* Update failed, try to restart the scene.
1246 *
1247 * Cannot call lp_setup_flush_and_restart() directly here
1248 * because of potential recursion.
1249 */
1250 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1251 return FALSE;
1252
1253 if (!set_scene_state(setup, SETUP_ACTIVE, __FUNCTION__))
1254 return FALSE;
1255
1256 if (!setup->scene)
1257 return FALSE;
1258
1259 return try_update_scene_state(setup);
1260 }
1261
1262 return TRUE;
1263 }
1264
1265
1266
1267 /* Only caller is lp_setup_vbuf_destroy()
1268 */
1269 void
lp_setup_destroy(struct lp_setup_context * setup)1270 lp_setup_destroy( struct lp_setup_context *setup )
1271 {
1272 uint i;
1273
1274 lp_setup_reset( setup );
1275
1276 util_unreference_framebuffer_state(&setup->fb);
1277
1278 for (i = 0; i < ARRAY_SIZE(setup->fs.current_tex); i++) {
1279 pipe_resource_reference(&setup->fs.current_tex[i], NULL);
1280 }
1281
1282 for (i = 0; i < ARRAY_SIZE(setup->constants); i++) {
1283 pipe_resource_reference(&setup->constants[i].current.buffer, NULL);
1284 }
1285
1286 /* free the scenes in the 'empty' queue */
1287 for (i = 0; i < ARRAY_SIZE(setup->scenes); i++) {
1288 struct lp_scene *scene = setup->scenes[i];
1289
1290 if (scene->fence)
1291 lp_fence_wait(scene->fence);
1292
1293 lp_scene_destroy(scene);
1294 }
1295
1296 lp_fence_reference(&setup->last_fence, NULL);
1297
1298 FREE( setup );
1299 }
1300
1301
1302 /**
1303 * Create a new primitive tiling engine. Plug it into the backend of
1304 * the draw module. Currently also creates a rasterizer to use with
1305 * it.
1306 */
1307 struct lp_setup_context *
lp_setup_create(struct pipe_context * pipe,struct draw_context * draw)1308 lp_setup_create( struct pipe_context *pipe,
1309 struct draw_context *draw )
1310 {
1311 struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1312 struct lp_setup_context *setup;
1313 unsigned i;
1314
1315 setup = CALLOC_STRUCT(lp_setup_context);
1316 if (!setup) {
1317 goto no_setup;
1318 }
1319
1320 lp_setup_init_vbuf(setup);
1321
1322 /* Used only in update_state():
1323 */
1324 setup->pipe = pipe;
1325
1326
1327 setup->num_threads = screen->num_threads;
1328 setup->vbuf = draw_vbuf_stage(draw, &setup->base);
1329 if (!setup->vbuf) {
1330 goto no_vbuf;
1331 }
1332
1333 draw_set_rasterize_stage(draw, setup->vbuf);
1334 draw_set_render(draw, &setup->base);
1335
1336 /* create some empty scenes */
1337 for (i = 0; i < MAX_SCENES; i++) {
1338 setup->scenes[i] = lp_scene_create( pipe );
1339 if (!setup->scenes[i]) {
1340 goto no_scenes;
1341 }
1342 }
1343
1344 setup->triangle = first_triangle;
1345 setup->line = first_line;
1346 setup->point = first_point;
1347
1348 setup->dirty = ~0;
1349
1350 /* Initialize empty default fb correctly, so the rect is empty */
1351 setup->framebuffer.x1 = -1;
1352 setup->framebuffer.y1 = -1;
1353
1354 return setup;
1355
1356 no_scenes:
1357 for (i = 0; i < MAX_SCENES; i++) {
1358 if (setup->scenes[i]) {
1359 lp_scene_destroy(setup->scenes[i]);
1360 }
1361 }
1362
1363 setup->vbuf->destroy(setup->vbuf);
1364 no_vbuf:
1365 FREE(setup);
1366 no_setup:
1367 return NULL;
1368 }
1369
1370
1371 /**
1372 * Put a BeginQuery command into all bins.
1373 */
1374 void
lp_setup_begin_query(struct lp_setup_context * setup,struct llvmpipe_query * pq)1375 lp_setup_begin_query(struct lp_setup_context *setup,
1376 struct llvmpipe_query *pq)
1377 {
1378
1379 set_scene_state(setup, SETUP_ACTIVE, "begin_query");
1380
1381 if (!(pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1382 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1383 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1384 pq->type == PIPE_QUERY_PIPELINE_STATISTICS))
1385 return;
1386
1387 /* init the query to its beginning state */
1388 assert(setup->active_binned_queries < LP_MAX_ACTIVE_BINNED_QUERIES);
1389 /* exceeding list size so just ignore the query */
1390 if (setup->active_binned_queries >= LP_MAX_ACTIVE_BINNED_QUERIES) {
1391 return;
1392 }
1393 assert(setup->active_queries[setup->active_binned_queries] == NULL);
1394 setup->active_queries[setup->active_binned_queries] = pq;
1395 setup->active_binned_queries++;
1396
1397 assert(setup->scene);
1398 if (setup->scene) {
1399 if (!lp_scene_bin_everywhere(setup->scene,
1400 LP_RAST_OP_BEGIN_QUERY,
1401 lp_rast_arg_query(pq))) {
1402
1403 if (!lp_setup_flush_and_restart(setup))
1404 return;
1405
1406 if (!lp_scene_bin_everywhere(setup->scene,
1407 LP_RAST_OP_BEGIN_QUERY,
1408 lp_rast_arg_query(pq))) {
1409 return;
1410 }
1411 }
1412 setup->scene->had_queries |= TRUE;
1413 }
1414 }
1415
1416
1417 /**
1418 * Put an EndQuery command into all bins.
1419 */
1420 void
lp_setup_end_query(struct lp_setup_context * setup,struct llvmpipe_query * pq)1421 lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
1422 {
1423 set_scene_state(setup, SETUP_ACTIVE, "end_query");
1424
1425 assert(setup->scene);
1426 if (setup->scene) {
1427 /* pq->fence should be the fence of the *last* scene which
1428 * contributed to the query result.
1429 */
1430 lp_fence_reference(&pq->fence, setup->scene->fence);
1431
1432 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1433 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1434 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1435 pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
1436 pq->type == PIPE_QUERY_TIMESTAMP) {
1437 if (pq->type == PIPE_QUERY_TIMESTAMP &&
1438 !(setup->scene->tiles_x | setup->scene->tiles_y)) {
1439 /*
1440 * If there's a zero width/height framebuffer, there's no bins and
1441 * hence no rast task is ever run. So fill in something here instead.
1442 */
1443 pq->end[0] = os_time_get_nano();
1444 }
1445
1446 if (!lp_scene_bin_everywhere(setup->scene,
1447 LP_RAST_OP_END_QUERY,
1448 lp_rast_arg_query(pq))) {
1449 if (!lp_setup_flush_and_restart(setup))
1450 goto fail;
1451
1452 if (!lp_scene_bin_everywhere(setup->scene,
1453 LP_RAST_OP_END_QUERY,
1454 lp_rast_arg_query(pq))) {
1455 goto fail;
1456 }
1457 }
1458 setup->scene->had_queries |= TRUE;
1459 }
1460 }
1461 else {
1462 lp_fence_reference(&pq->fence, setup->last_fence);
1463 }
1464
1465 fail:
1466 /* Need to do this now not earlier since it still needs to be marked as
1467 * active when binning it would cause a flush.
1468 */
1469 if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
1470 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1471 pq->type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE ||
1472 pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
1473 unsigned i;
1474
1475 /* remove from active binned query list */
1476 for (i = 0; i < setup->active_binned_queries; i++) {
1477 if (setup->active_queries[i] == pq)
1478 break;
1479 }
1480 assert(i < setup->active_binned_queries);
1481 if (i == setup->active_binned_queries)
1482 return;
1483 setup->active_binned_queries--;
1484 setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
1485 setup->active_queries[setup->active_binned_queries] = NULL;
1486 }
1487 }
1488
1489
1490 boolean
lp_setup_flush_and_restart(struct lp_setup_context * setup)1491 lp_setup_flush_and_restart(struct lp_setup_context *setup)
1492 {
1493 if (0) debug_printf("%s\n", __FUNCTION__);
1494
1495 assert(setup->state == SETUP_ACTIVE);
1496
1497 if (!set_scene_state(setup, SETUP_FLUSHED, __FUNCTION__))
1498 return FALSE;
1499
1500 if (!lp_setup_update_state(setup, TRUE))
1501 return FALSE;
1502
1503 return TRUE;
1504 }
1505
1506
1507