1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_framebuffer.h"
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/reallocarray.h"
32 #include "util/u_inlines.h"
33 #include "util/format/u_format.h"
34 #include "lp_scene.h"
35 #include "lp_fence.h"
36 #include "lp_debug.h"
37 #include "lp_context.h"
38 #include "lp_state_fs.h"
39 #include "lp_setup_context.h"
40
41
42 #define RESOURCE_REF_SZ 32
43 /** List of resource references */
44 struct resource_ref {
45 struct pipe_resource *resource[RESOURCE_REF_SZ];
46 int count;
47 struct resource_ref *next;
48 };
49
50
51 #define SHADER_REF_SZ 32
52 /** List of shader variant references */
53 struct shader_ref {
54 struct lp_fragment_shader_variant *variant[SHADER_REF_SZ];
55 int count;
56 struct shader_ref *next;
57 };
58
59
60 /**
61 * Create a new scene object.
62 * \param queue the queue to put newly rendered/emptied scenes into
63 */
64 struct lp_scene *
lp_scene_create(struct lp_setup_context * setup)65 lp_scene_create(struct lp_setup_context *setup)
66 {
67 struct lp_scene *scene = slab_alloc_st(&setup->scene_slab);
68 if (!scene)
69 return NULL;
70
71 memset(scene, 0, sizeof(struct lp_scene));
72 scene->pipe = setup->pipe;
73 scene->setup = setup;
74 scene->data.head = &scene->data.first;
75
76 (void) mtx_init(&scene->mutex, mtx_plain);
77
78 #if MESA_DEBUG
79 /* Do some scene limit sanity checks here */
80 {
81 size_t maxBins = TILES_X * TILES_Y;
82 size_t maxCommandBytes = sizeof(struct cmd_block) * maxBins;
83 size_t maxCommandPlusData = maxCommandBytes + DATA_BLOCK_SIZE;
84 /* We'll need at least one command block per bin. Make sure that's
85 * less than the max allowed scene size.
86 */
87 assert(maxCommandBytes < LP_SCENE_MAX_SIZE);
88 /* We'll also need space for at least one other data block */
89 assert(maxCommandPlusData <= LP_SCENE_MAX_SIZE);
90 }
91 #endif
92
93 return scene;
94 }
95
96
97 /**
98 * Free all data associated with the given scene, and the scene itself.
99 */
100 void
lp_scene_destroy(struct lp_scene * scene)101 lp_scene_destroy(struct lp_scene *scene)
102 {
103 lp_scene_end_rasterization(scene);
104 mtx_destroy(&scene->mutex);
105 free(scene->tiles);
106 assert(scene->data.head == &scene->data.first);
107 slab_free_st(&scene->setup->scene_slab, scene);
108 }
109
110
111 /**
112 * Check if the scene's bins are all empty.
113 * For debugging purposes.
114 */
115 bool
lp_scene_is_empty(struct lp_scene * scene)116 lp_scene_is_empty(struct lp_scene *scene)
117 {
118 for (unsigned y = 0; y < scene->tiles_y; y++) {
119 for (unsigned x = 0; x < scene->tiles_x; x++) {
120 const struct cmd_bin *bin = lp_scene_get_bin(scene, x, y);
121 if (bin->head) {
122 return false;
123 }
124 }
125 }
126 return true;
127 }
128
129
130 /* Returns true if there has ever been a failed allocation attempt in
131 * this scene. Used in triangle/rectangle emit to avoid having to
132 * check success at each bin.
133 */
134 bool
lp_scene_is_oom(struct lp_scene * scene)135 lp_scene_is_oom(struct lp_scene *scene)
136 {
137 return scene->alloc_failed;
138 }
139
140
141 /* Remove all commands from a bin. Tries to reuse some of the memory
142 * allocated to the bin, however.
143 */
144 void
lp_scene_bin_reset(struct lp_scene * scene,unsigned x,unsigned y)145 lp_scene_bin_reset(struct lp_scene *scene, unsigned x, unsigned y)
146 {
147 struct cmd_bin *bin = lp_scene_get_bin(scene, x, y);
148
149 bin->last_state = NULL;
150 bin->head = bin->tail;
151 if (bin->tail) {
152 bin->tail->next = NULL;
153 bin->tail->count = 0;
154 }
155 }
156
157
158 static void
init_scene_texture(struct lp_scene_surface * ssurf,struct pipe_surface * psurf)159 init_scene_texture(struct lp_scene_surface *ssurf, struct pipe_surface *psurf)
160 {
161 if (!psurf) {
162 ssurf->stride = 0;
163 ssurf->layer_stride = 0;
164 ssurf->sample_stride = 0;
165 ssurf->nr_samples = 0;
166 ssurf->map = NULL;
167 return;
168 }
169
170 if (llvmpipe_resource_is_texture(psurf->texture)) {
171 ssurf->stride = llvmpipe_resource_stride(psurf->texture,
172 psurf->u.tex.level);
173 ssurf->layer_stride = llvmpipe_layer_stride(psurf->texture,
174 psurf->u.tex.level);
175 ssurf->sample_stride = llvmpipe_sample_stride(psurf->texture);
176
177 ssurf->map = llvmpipe_resource_map(psurf->texture,
178 psurf->u.tex.level,
179 psurf->u.tex.first_layer,
180 LP_TEX_USAGE_READ_WRITE);
181 assert(ssurf->map);
182 ssurf->format_bytes = util_format_get_blocksize(psurf->format);
183 ssurf->nr_samples = util_res_sample_count(psurf->texture);
184 ssurf->base_layer = psurf->u.tex.first_layer;
185 ssurf->layer_count = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1;
186 } else {
187 struct llvmpipe_resource *lpr = llvmpipe_resource(psurf->texture);
188 unsigned pixstride = util_format_get_blocksize(psurf->format);
189 ssurf->stride = psurf->texture->width0;
190 ssurf->layer_stride = 0;
191 ssurf->sample_stride = 0;
192 ssurf->nr_samples = 1;
193 ssurf->map = lpr->data;
194 ssurf->map += psurf->u.buf.first_element * pixstride;
195 ssurf->format_bytes = util_format_get_blocksize(psurf->format);
196 }
197 }
198
199
200 void
lp_scene_begin_rasterization(struct lp_scene * scene)201 lp_scene_begin_rasterization(struct lp_scene *scene)
202 {
203 const struct pipe_framebuffer_state *fb = &scene->fb;
204
205 //LP_DBG(DEBUG_RAST, "%s\n", __func__);
206
207 for (unsigned i = 0; i < scene->fb.nr_cbufs; i++) {
208 struct pipe_surface *cbuf = scene->fb.cbufs[i];
209 init_scene_texture(&scene->cbufs[i], cbuf);
210 }
211
212 if (fb->zsbuf) {
213 struct pipe_surface *zsbuf = scene->fb.zsbuf;
214 init_scene_texture(&scene->zsbuf, zsbuf);
215 }
216 }
217
218
219 /**
220 * Free all the temporary data in a scene.
221 */
222 void
lp_scene_end_rasterization(struct lp_scene * scene)223 lp_scene_end_rasterization(struct lp_scene *scene)
224 {
225 mtx_lock(&scene->mutex);
226
227 /* Unmap color buffers */
228 for (unsigned i = 0; i < scene->fb.nr_cbufs; i++) {
229 if (scene->cbufs[i].map) {
230 struct pipe_surface *cbuf = scene->fb.cbufs[i];
231 if (llvmpipe_resource_is_texture(cbuf->texture)) {
232 llvmpipe_resource_unmap(cbuf->texture,
233 cbuf->u.tex.level,
234 cbuf->u.tex.first_layer);
235 }
236 scene->cbufs[i].map = NULL;
237 }
238 }
239
240 /* Unmap z/stencil buffer */
241 if (scene->zsbuf.map) {
242 struct pipe_surface *zsbuf = scene->fb.zsbuf;
243 llvmpipe_resource_unmap(zsbuf->texture,
244 zsbuf->u.tex.level,
245 zsbuf->u.tex.first_layer);
246 scene->zsbuf.map = NULL;
247 }
248
249 /* Reset all command lists:
250 */
251 memset(scene->tiles, 0, sizeof(struct cmd_bin) * scene->num_alloced_tiles);
252
253 /* Decrement texture ref counts
254 */
255 int j = 0;
256 for (struct resource_ref *ref = scene->resources; ref; ref = ref->next) {
257 for (int i = 0; i < ref->count; i++) {
258 if (LP_DEBUG & DEBUG_SETUP)
259 debug_printf("resource %d: %p %dx%d sz %d\n",
260 j,
261 (void *) ref->resource[i],
262 ref->resource[i]->width0,
263 ref->resource[i]->height0,
264 llvmpipe_resource_size(ref->resource[i]));
265 j++;
266 llvmpipe_resource_unmap(ref->resource[i], 0, 0);
267 pipe_resource_reference(&ref->resource[i], NULL);
268 }
269 }
270
271 for (struct resource_ref *ref = scene->writeable_resources; ref;
272 ref = ref->next) {
273 for (int i = 0; i < ref->count; i++) {
274 if (LP_DEBUG & DEBUG_SETUP)
275 debug_printf("resource %d: %p %dx%d sz %d\n",
276 j,
277 (void *) ref->resource[i],
278 ref->resource[i]->width0,
279 ref->resource[i]->height0,
280 llvmpipe_resource_size(ref->resource[i]));
281 j++;
282 llvmpipe_resource_unmap(ref->resource[i], 0, 0);
283 pipe_resource_reference(&ref->resource[i], NULL);
284 }
285 }
286
287 if (LP_DEBUG & DEBUG_SETUP) {
288 debug_printf("scene %d resources, sz %d\n",
289 j, scene->resource_reference_size);
290 }
291
292 /* Decrement shader variant ref counts
293 */
294 j = 0;
295 for (struct shader_ref *ref = scene->frag_shaders; ref; ref = ref->next) {
296 for (int i = 0; i < ref->count; i++) {
297 if (LP_DEBUG & DEBUG_SETUP)
298 debug_printf("shader %d: %p\n", j, (void *) ref->variant[i]);
299 j++;
300 lp_fs_variant_reference(llvmpipe_context(scene->pipe),
301 &ref->variant[i], NULL);
302 }
303 }
304
305 /* Free all scene data blocks:
306 */
307 {
308 struct data_block_list *list = &scene->data;
309 struct data_block *block, *tmp;
310
311 for (block = list->head; block; block = tmp) {
312 tmp = block->next;
313 if (block != &list->first)
314 FREE(block);
315 }
316
317 list->head = &list->first;
318 list->head->next = NULL;
319 }
320
321 lp_fence_reference(&scene->fence, NULL);
322
323 scene->resources = NULL;
324 scene->writeable_resources = NULL;
325 scene->frag_shaders = NULL;
326 scene->scene_size = 0;
327 scene->resource_reference_size = 0;
328
329 scene->alloc_failed = false;
330
331 util_unreference_framebuffer_state(&scene->fb);
332
333 mtx_unlock(&scene->mutex);
334 }
335
336
337 struct cmd_block *
lp_scene_new_cmd_block(struct lp_scene * scene,struct cmd_bin * bin)338 lp_scene_new_cmd_block(struct lp_scene *scene,
339 struct cmd_bin *bin)
340 {
341 struct cmd_block *block = lp_scene_alloc(scene, sizeof(struct cmd_block));
342 if (block) {
343 if (bin->tail) {
344 bin->tail->next = block;
345 bin->tail = block;
346 } else {
347 bin->head = block;
348 bin->tail = block;
349 }
350 //memset(block, 0, sizeof *block);
351 block->next = NULL;
352 block->count = 0;
353 }
354 return block;
355 }
356
357
358 struct data_block *
lp_scene_new_data_block(struct lp_scene * scene)359 lp_scene_new_data_block(struct lp_scene *scene)
360 {
361 if (scene->scene_size + DATA_BLOCK_SIZE > LP_SCENE_MAX_SIZE) {
362 if (0) debug_printf("%s: failed\n", __func__);
363 scene->alloc_failed = true;
364 return NULL;
365 } else {
366 struct data_block *block = MALLOC_STRUCT(data_block);
367 if (!block)
368 return NULL;
369
370 scene->scene_size += sizeof *block;
371
372 block->used = 0;
373 block->next = scene->data.head;
374 scene->data.head = block;
375
376 return block;
377 }
378 }
379
380
381 /**
382 * Return number of bytes used for all bin data within a scene.
383 * This does not include resources (textures) referenced by the scene.
384 */
385 static unsigned
lp_scene_data_size(const struct lp_scene * scene)386 lp_scene_data_size(const struct lp_scene *scene)
387 {
388 unsigned size = 0;
389 const struct data_block *block;
390 for (block = scene->data.head; block; block = block->next) {
391 size += block->used;
392 }
393 return size;
394 }
395
396
397
398 /**
399 * Add a reference to a resource by the scene.
400 */
401 bool
lp_scene_add_resource_reference(struct lp_scene * scene,struct pipe_resource * resource,bool initializing_scene,bool writeable)402 lp_scene_add_resource_reference(struct lp_scene *scene,
403 struct pipe_resource *resource,
404 bool initializing_scene,
405 bool writeable)
406 {
407 struct resource_ref *ref;
408 int i;
409 struct resource_ref **list = writeable ? &scene->writeable_resources : &scene->resources;
410 struct resource_ref **last = list;
411
412 mtx_lock(&scene->mutex);
413
414 /* Look at existing resource blocks:
415 */
416 for (ref = *list; ref; ref = ref->next) {
417 last = &ref->next;
418
419 /* Search for this resource:
420 */
421 for (i = 0; i < ref->count; i++)
422 if (ref->resource[i] == resource) {
423 mtx_unlock(&scene->mutex);
424 return true;
425 }
426
427 if (ref->count < RESOURCE_REF_SZ) {
428 /* If the block is half-empty, then append the reference here.
429 */
430 break;
431 }
432 }
433
434 /* Create a new block if no half-empty block was found.
435 */
436 if (!ref) {
437 assert(*last == NULL);
438 *last = lp_scene_alloc(scene, sizeof *ref);
439 if (*last == NULL) {
440 mtx_unlock(&scene->mutex);
441 return false;
442 }
443
444 ref = *last;
445 memset(ref, 0, sizeof *ref);
446 }
447
448 /* Map resource again to increment the map count. We likely use the
449 * already-mapped pointer in a texture of the jit context, and that pointer
450 * needs to stay mapped during rasterization. This map is unmap'ed when
451 * finalizing scene rasterization. */
452 llvmpipe_resource_map(resource, 0, 0, LP_TEX_USAGE_READ);
453
454 /* Append the reference to the reference block.
455 */
456 pipe_resource_reference(&ref->resource[ref->count++], resource);
457 scene->resource_reference_size += llvmpipe_resource_size(resource);
458
459 /* Heuristic to advise scene flushes. This isn't helpful in the
460 * initial setup of the scene, but after that point flush on the
461 * next resource added which exceeds 64MB in referenced texture
462 * data.
463 */
464 int flush = (initializing_scene || scene->resource_reference_size < LP_SCENE_MAX_RESOURCE_SIZE);
465 mtx_unlock(&scene->mutex);
466 return flush;
467 }
468
469 /**
470 * Add a reference to a fragment shader variant
471 * Return FALSE if out of memory, TRUE otherwise.
472 */
473 bool
lp_scene_add_frag_shader_reference(struct lp_scene * scene,struct lp_fragment_shader_variant * variant)474 lp_scene_add_frag_shader_reference(struct lp_scene *scene,
475 struct lp_fragment_shader_variant *variant)
476 {
477 struct shader_ref *ref, **last = &scene->frag_shaders;
478
479 /* Look at existing resource blocks:
480 */
481 for (ref = scene->frag_shaders; ref; ref = ref->next) {
482 last = &ref->next;
483
484 /* Search for this resource:
485 */
486 for (int i = 0; i < ref->count; i++)
487 if (ref->variant[i] == variant)
488 return true;
489
490 if (ref->count < SHADER_REF_SZ) {
491 /* If the block is half-empty, then append the reference here.
492 */
493 break;
494 }
495 }
496
497 /* Create a new block if no half-empty block was found.
498 */
499 if (!ref) {
500 assert(*last == NULL);
501 *last = lp_scene_alloc(scene, sizeof *ref);
502 if (*last == NULL)
503 return false;
504
505 ref = *last;
506 memset(ref, 0, sizeof *ref);
507 }
508
509 /* Append the reference to the reference block.
510 */
511 lp_fs_variant_reference(llvmpipe_context(scene->pipe),
512 &ref->variant[ref->count++], variant);
513
514 return true;
515 }
516
517
518 /**
519 * Does this scene have a reference to the given resource?
520 * Returns bitmask of LP_REFERENCED_FOR_READ/WRITE bits.
521 */
522 unsigned
lp_scene_is_resource_referenced(const struct lp_scene * scene,const struct pipe_resource * resource)523 lp_scene_is_resource_referenced(const struct lp_scene *scene,
524 const struct pipe_resource *resource)
525 {
526 const struct resource_ref *ref;
527
528 /* check the render targets */
529 for (unsigned j = 0; j < scene->fb.nr_cbufs; j++) {
530 if (scene->fb.cbufs[j] && scene->fb.cbufs[j]->texture == resource)
531 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
532 }
533 if (scene->fb.zsbuf && scene->fb.zsbuf->texture == resource) {
534 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
535 }
536
537 for (ref = scene->resources; ref; ref = ref->next) {
538 for (int i = 0; i < ref->count; i++)
539 if (ref->resource[i] == resource)
540 return LP_REFERENCED_FOR_READ;
541 }
542
543 for (ref = scene->writeable_resources; ref; ref = ref->next) {
544 for (int i = 0; i < ref->count; i++)
545 if (ref->resource[i] == resource)
546 return LP_REFERENCED_FOR_READ | LP_REFERENCED_FOR_WRITE;
547 }
548
549 return 0;
550 }
551
552
553 /** advance curr_x,y to the next bin */
554 static bool
next_bin(struct lp_scene * scene)555 next_bin(struct lp_scene *scene)
556 {
557 scene->curr_x++;
558 if (scene->curr_x >= scene->tiles_x) {
559 scene->curr_x = 0;
560 scene->curr_y++;
561 }
562 if (scene->curr_y >= scene->tiles_y) {
563 /* no more bins */
564 return false;
565 }
566 return true;
567 }
568
569
570 void
lp_scene_bin_iter_begin(struct lp_scene * scene)571 lp_scene_bin_iter_begin(struct lp_scene *scene)
572 {
573 scene->curr_x = scene->curr_y = -1;
574 }
575
576
577 /**
578 * Return pointer to next bin to be rendered.
579 * The lp_scene::curr_x and ::curr_y fields will be advanced.
580 * Multiple rendering threads will call this function to get a chunk
581 * of work (a bin) to work on.
582 */
583 struct cmd_bin *
lp_scene_bin_iter_next(struct lp_scene * scene,int * x,int * y)584 lp_scene_bin_iter_next(struct lp_scene *scene , int *x, int *y)
585 {
586 struct cmd_bin *bin = NULL;
587
588 mtx_lock(&scene->mutex);
589
590 if (scene->curr_x < 0) {
591 /* first bin */
592 scene->curr_x = 0;
593 scene->curr_y = 0;
594 } else if (!next_bin(scene)) {
595 /* no more bins left */
596 goto end;
597 }
598
599 bin = lp_scene_get_bin(scene, scene->curr_x, scene->curr_y);
600 *x = scene->curr_x;
601 *y = scene->curr_y;
602
603 end:
604 /*printf("return bin %p at %d, %d\n", (void *) bin, *bin_x, *bin_y);*/
605 mtx_unlock(&scene->mutex);
606 return bin;
607 }
608
609
610 void
lp_scene_begin_binning(struct lp_scene * scene,struct pipe_framebuffer_state * fb)611 lp_scene_begin_binning(struct lp_scene *scene,
612 struct pipe_framebuffer_state *fb)
613 {
614 assert(lp_scene_is_empty(scene));
615
616 util_copy_framebuffer_state(&scene->fb, fb);
617
618 scene->tiles_x = align(fb->width, TILE_SIZE) / TILE_SIZE;
619 scene->tiles_y = align(fb->height, TILE_SIZE) / TILE_SIZE;
620 assert(scene->tiles_x <= TILES_X);
621 assert(scene->tiles_y <= TILES_Y);
622
623 unsigned num_required_tiles = scene->tiles_x * scene->tiles_y;
624 if (scene->num_alloced_tiles < num_required_tiles) {
625 scene->tiles = reallocarray(scene->tiles, num_required_tiles,
626 sizeof(struct cmd_bin));
627 if (!scene->tiles)
628 return;
629 memset(scene->tiles, 0, sizeof(struct cmd_bin) * num_required_tiles);
630 scene->num_alloced_tiles = num_required_tiles;
631 }
632
633 /*
634 * Determine how many layers the fb has (used for clamping layer value).
635 * OpenGL (but not d3d10) permits different amount of layers per rt,
636 * however results are undefined if layer exceeds the amount of layers of
637 * ANY attachment hence don't need separate per cbuf and zsbuf max.
638 */
639 unsigned max_layer = ~0;
640 for (unsigned i = 0; i < scene->fb.nr_cbufs; i++) {
641 struct pipe_surface *cbuf = scene->fb.cbufs[i];
642 if (cbuf) {
643 if (llvmpipe_resource_is_texture(cbuf->texture)) {
644 max_layer = MIN2(max_layer,
645 cbuf->u.tex.last_layer - cbuf->u.tex.first_layer);
646 } else {
647 max_layer = 0;
648 }
649 }
650 }
651
652 if (fb->zsbuf) {
653 struct pipe_surface *zsbuf = scene->fb.zsbuf;
654 max_layer = MIN2(max_layer, zsbuf->u.tex.last_layer - zsbuf->u.tex.first_layer);
655 }
656
657 scene->fb_max_layer = max_layer;
658 scene->fb_max_samples = util_framebuffer_get_num_samples(fb);
659 if (scene->fb_max_samples == 4) {
660 for (unsigned i = 0; i < 4; i++) {
661 scene->fixed_sample_pos[i][0] = util_iround(lp_sample_pos_4x[i][0] * FIXED_ONE);
662 scene->fixed_sample_pos[i][1] = util_iround(lp_sample_pos_4x[i][1] * FIXED_ONE);
663 }
664 }
665 }
666
667
668 void
lp_scene_end_binning(struct lp_scene * scene)669 lp_scene_end_binning(struct lp_scene *scene)
670 {
671 if (LP_DEBUG & DEBUG_SCENE) {
672 debug_printf("rasterize scene:\n");
673 debug_printf(" scene_size: %u\n",
674 scene->scene_size);
675 debug_printf(" data size: %u\n",
676 lp_scene_data_size(scene));
677
678 if (0)
679 lp_debug_bins(scene);
680 }
681 }
682