• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28 
29 #include "pipe/p_state.h"
30 #include "util/u_dual_blend.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_helpers.h"
34 
35 #include "freedreno_state.h"
36 #include "freedreno_context.h"
37 #include "freedreno_resource.h"
38 #include "freedreno_texture.h"
39 #include "freedreno_gmem.h"
40 #include "freedreno_query_hw.h"
41 #include "freedreno_util.h"
42 
43 /* All the generic state handling.. In case of CSO's that are specific
44  * to the GPU version, when the bind and the delete are common they can
45  * go in here.
46  */
47 
48 static void
fd_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * blend_color)49 fd_set_blend_color(struct pipe_context *pctx,
50 		const struct pipe_blend_color *blend_color)
51 {
52 	struct fd_context *ctx = fd_context(pctx);
53 	ctx->blend_color = *blend_color;
54 	ctx->dirty |= FD_DIRTY_BLEND_COLOR;
55 }
56 
57 static void
fd_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref * stencil_ref)58 fd_set_stencil_ref(struct pipe_context *pctx,
59 		const struct pipe_stencil_ref *stencil_ref)
60 {
61 	struct fd_context *ctx = fd_context(pctx);
62 	ctx->stencil_ref =* stencil_ref;
63 	ctx->dirty |= FD_DIRTY_STENCIL_REF;
64 }
65 
66 static void
fd_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * clip)67 fd_set_clip_state(struct pipe_context *pctx,
68 		const struct pipe_clip_state *clip)
69 {
70 	struct fd_context *ctx = fd_context(pctx);
71 	ctx->ucp = *clip;
72 	ctx->dirty |= FD_DIRTY_UCP;
73 }
74 
75 static void
fd_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)76 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
77 {
78 	struct fd_context *ctx = fd_context(pctx);
79 	ctx->sample_mask = (uint16_t)sample_mask;
80 	ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
81 }
82 
83 /* notes from calim on #dri-devel:
84  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
85  * out to vec4's
86  * I should be able to consider that I own the user_ptr until the next
87  * set_constant_buffer() call, at which point I don't really care about the
88  * previous values.
89  * index>0 will be UBO's.. well, I'll worry about that later
90  */
91 static void
fd_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,const struct pipe_constant_buffer * cb)92 fd_set_constant_buffer(struct pipe_context *pctx,
93 		enum pipe_shader_type shader, uint index,
94 		const struct pipe_constant_buffer *cb)
95 {
96 	struct fd_context *ctx = fd_context(pctx);
97 	struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
98 
99 	util_copy_constant_buffer(&so->cb[index], cb);
100 
101 	/* Note that the state tracker can unbind constant buffers by
102 	 * passing NULL here.
103 	 */
104 	if (unlikely(!cb)) {
105 		so->enabled_mask &= ~(1 << index);
106 		so->dirty_mask &= ~(1 << index);
107 		return;
108 	}
109 
110 	so->enabled_mask |= 1 << index;
111 	so->dirty_mask |= 1 << index;
112 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_CONST;
113 	ctx->dirty |= FD_DIRTY_CONST;
114 }
115 
116 static void
fd_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers)117 fd_set_shader_buffers(struct pipe_context *pctx,
118 		enum pipe_shader_type shader,
119 		unsigned start, unsigned count,
120 		const struct pipe_shader_buffer *buffers)
121 {
122 	struct fd_context *ctx = fd_context(pctx);
123 	struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
124 	unsigned mask = 0;
125 
126 	if (buffers) {
127 		for (unsigned i = 0; i < count; i++) {
128 			unsigned n = i + start;
129 			struct pipe_shader_buffer *buf = &so->sb[n];
130 
131 			if ((buf->buffer == buffers[i].buffer) &&
132 					(buf->buffer_offset == buffers[i].buffer_offset) &&
133 					(buf->buffer_size == buffers[i].buffer_size))
134 				continue;
135 
136 			mask |= BIT(n);
137 
138 			buf->buffer_offset = buffers[i].buffer_offset;
139 			buf->buffer_size = buffers[i].buffer_size;
140 			pipe_resource_reference(&buf->buffer, buffers[i].buffer);
141 
142 			if (buf->buffer)
143 				so->enabled_mask |= BIT(n);
144 			else
145 				so->enabled_mask &= ~BIT(n);
146 		}
147 	} else {
148 		mask = (BIT(count) - 1) << start;
149 
150 		for (unsigned i = 0; i < count; i++) {
151 			unsigned n = i + start;
152 			struct pipe_shader_buffer *buf = &so->sb[n];
153 
154 			pipe_resource_reference(&buf->buffer, NULL);
155 		}
156 
157 		so->enabled_mask &= ~mask;
158 	}
159 
160 	so->dirty_mask |= mask;
161 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_SSBO;
162 }
163 
164 static void
fd_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_image_view * images)165 fd_set_shader_images(struct pipe_context *pctx,
166 		enum pipe_shader_type shader,
167 		unsigned start, unsigned count,
168 		const struct pipe_image_view *images)
169 {
170 	struct fd_context *ctx = fd_context(pctx);
171 	struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
172 
173 	unsigned mask = 0;
174 
175 	if (images) {
176 		for (unsigned i = 0; i < count; i++) {
177 			unsigned n = i + start;
178 			struct pipe_image_view *buf = &so->si[n];
179 
180 			if ((buf->resource == images[i].resource) &&
181 					(buf->format == images[i].format) &&
182 					(buf->access == images[i].access) &&
183 					!memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
184 				continue;
185 
186 			mask |= BIT(n);
187 			util_copy_image_view(buf, &images[i]);
188 
189 			if (buf->resource)
190 				so->enabled_mask |= BIT(n);
191 			else
192 				so->enabled_mask &= ~BIT(n);
193 		}
194 	} else {
195 		mask = (BIT(count) - 1) << start;
196 
197 		for (unsigned i = 0; i < count; i++) {
198 			unsigned n = i + start;
199 			struct pipe_image_view *img = &so->si[n];
200 
201 			pipe_resource_reference(&img->resource, NULL);
202 		}
203 
204 		so->enabled_mask &= ~mask;
205 	}
206 
207 	so->dirty_mask |= mask;
208 	ctx->dirty_shader[shader] |= FD_DIRTY_SHADER_IMAGE;
209 }
210 
211 static void
fd_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * framebuffer)212 fd_set_framebuffer_state(struct pipe_context *pctx,
213 		const struct pipe_framebuffer_state *framebuffer)
214 {
215 	struct fd_context *ctx = fd_context(pctx);
216 	struct pipe_framebuffer_state *cso;
217 
218 	if (ctx->screen->reorder) {
219 		struct fd_batch *batch, *old_batch = NULL;
220 
221 		fd_batch_reference(&old_batch, ctx->batch);
222 
223 		if (likely(old_batch))
224 			fd_batch_set_stage(old_batch, FD_STAGE_NULL);
225 
226 		batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
227 		fd_batch_reference(&ctx->batch, NULL);
228 		fd_reset_wfi(batch);
229 		ctx->batch = batch;
230 		fd_context_all_dirty(ctx);
231 
232 		if (old_batch && old_batch->blit && !old_batch->back_blit) {
233 			/* for blits, there is not really much point in hanging on
234 			 * to the uncommitted batch (ie. you probably don't blit
235 			 * multiple times to the same surface), so we might as
236 			 * well go ahead and flush this one:
237 			 */
238 			fd_batch_flush(old_batch, false, false);
239 		}
240 
241 		fd_batch_reference(&old_batch, NULL);
242 	} else {
243 		DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
244 				framebuffer->cbufs[0], framebuffer->zsbuf);
245 		fd_batch_flush(ctx->batch, false, false);
246 	}
247 
248 	cso = &ctx->batch->framebuffer;
249 
250 	util_copy_framebuffer_state(cso, framebuffer);
251 
252 	ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
253 
254 	ctx->disabled_scissor.minx = 0;
255 	ctx->disabled_scissor.miny = 0;
256 	ctx->disabled_scissor.maxx = cso->width;
257 	ctx->disabled_scissor.maxy = cso->height;
258 
259 	ctx->dirty |= FD_DIRTY_SCISSOR;
260 }
261 
262 static void
fd_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * stipple)263 fd_set_polygon_stipple(struct pipe_context *pctx,
264 		const struct pipe_poly_stipple *stipple)
265 {
266 	struct fd_context *ctx = fd_context(pctx);
267 	ctx->stipple = *stipple;
268 	ctx->dirty |= FD_DIRTY_STIPPLE;
269 }
270 
271 static void
fd_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissor)272 fd_set_scissor_states(struct pipe_context *pctx,
273 		unsigned start_slot,
274 		unsigned num_scissors,
275 		const struct pipe_scissor_state *scissor)
276 {
277 	struct fd_context *ctx = fd_context(pctx);
278 
279 	ctx->scissor = *scissor;
280 	ctx->dirty |= FD_DIRTY_SCISSOR;
281 }
282 
283 static void
fd_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewport)284 fd_set_viewport_states(struct pipe_context *pctx,
285 		unsigned start_slot,
286 		unsigned num_viewports,
287 		const struct pipe_viewport_state *viewport)
288 {
289 	struct fd_context *ctx = fd_context(pctx);
290 	ctx->viewport = *viewport;
291 	ctx->dirty |= FD_DIRTY_VIEWPORT;
292 }
293 
294 static void
fd_set_vertex_buffers(struct pipe_context * pctx,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * vb)295 fd_set_vertex_buffers(struct pipe_context *pctx,
296 		unsigned start_slot, unsigned count,
297 		const struct pipe_vertex_buffer *vb)
298 {
299 	struct fd_context *ctx = fd_context(pctx);
300 	struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
301 	int i;
302 
303 	/* on a2xx, pitch is encoded in the vtx fetch instruction, so
304 	 * we need to mark VTXSTATE as dirty as well to trigger patching
305 	 * and re-emitting the vtx shader:
306 	 */
307 	if (ctx->screen->gpu_id < 300) {
308 		for (i = 0; i < count; i++) {
309 			bool new_enabled = vb && vb[i].buffer.resource;
310 			bool old_enabled = so->vb[i].buffer.resource != NULL;
311 			uint32_t new_stride = vb ? vb[i].stride : 0;
312 			uint32_t old_stride = so->vb[i].stride;
313 			if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
314 				ctx->dirty |= FD_DIRTY_VTXSTATE;
315 				break;
316 			}
317 		}
318 	}
319 
320 	util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
321 	so->count = util_last_bit(so->enabled_mask);
322 
323 	ctx->dirty |= FD_DIRTY_VTXBUF;
324 }
325 
326 static void
fd_blend_state_bind(struct pipe_context * pctx,void * hwcso)327 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
328 {
329 	struct fd_context *ctx = fd_context(pctx);
330 	struct pipe_blend_state *cso = hwcso;
331 	bool old_is_dual = ctx->blend ?
332 		ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
333 		false;
334 	bool new_is_dual = cso ?
335 		cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
336 		false;
337 	ctx->blend = hwcso;
338 	ctx->dirty |= FD_DIRTY_BLEND;
339 	if (old_is_dual != new_is_dual)
340 		ctx->dirty |= FD_DIRTY_BLEND_DUAL;
341 }
342 
343 static void
fd_blend_state_delete(struct pipe_context * pctx,void * hwcso)344 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
345 {
346 	FREE(hwcso);
347 }
348 
349 static void
fd_rasterizer_state_bind(struct pipe_context * pctx,void * hwcso)350 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
351 {
352 	struct fd_context *ctx = fd_context(pctx);
353 	struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
354 
355 	ctx->rasterizer = hwcso;
356 	ctx->dirty |= FD_DIRTY_RASTERIZER;
357 
358 	/* if scissor enable bit changed we need to mark scissor
359 	 * state as dirty as well:
360 	 * NOTE: we can do a shallow compare, since we only care
361 	 * if it changed to/from &ctx->disable_scissor
362 	 */
363 	if (old_scissor != fd_context_get_scissor(ctx))
364 		ctx->dirty |= FD_DIRTY_SCISSOR;
365 }
366 
367 static void
fd_rasterizer_state_delete(struct pipe_context * pctx,void * hwcso)368 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
369 {
370 	FREE(hwcso);
371 }
372 
373 static void
fd_zsa_state_bind(struct pipe_context * pctx,void * hwcso)374 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
375 {
376 	struct fd_context *ctx = fd_context(pctx);
377 	ctx->zsa = hwcso;
378 	ctx->dirty |= FD_DIRTY_ZSA;
379 }
380 
381 static void
fd_zsa_state_delete(struct pipe_context * pctx,void * hwcso)382 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
383 {
384 	FREE(hwcso);
385 }
386 
387 static void *
fd_vertex_state_create(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)388 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
389 		const struct pipe_vertex_element *elements)
390 {
391 	struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
392 
393 	if (!so)
394 		return NULL;
395 
396 	memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
397 	so->num_elements = num_elements;
398 
399 	return so;
400 }
401 
402 static void
fd_vertex_state_delete(struct pipe_context * pctx,void * hwcso)403 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
404 {
405 	FREE(hwcso);
406 }
407 
408 static void
fd_vertex_state_bind(struct pipe_context * pctx,void * hwcso)409 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
410 {
411 	struct fd_context *ctx = fd_context(pctx);
412 	ctx->vtx.vtx = hwcso;
413 	ctx->dirty |= FD_DIRTY_VTXSTATE;
414 }
415 
416 static struct pipe_stream_output_target *
fd_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)417 fd_create_stream_output_target(struct pipe_context *pctx,
418 		struct pipe_resource *prsc, unsigned buffer_offset,
419 		unsigned buffer_size)
420 {
421 	struct pipe_stream_output_target *target;
422 	struct fd_resource *rsc = fd_resource(prsc);
423 
424 	target = CALLOC_STRUCT(pipe_stream_output_target);
425 	if (!target)
426 		return NULL;
427 
428 	pipe_reference_init(&target->reference, 1);
429 	pipe_resource_reference(&target->buffer, prsc);
430 
431 	target->context = pctx;
432 	target->buffer_offset = buffer_offset;
433 	target->buffer_size = buffer_size;
434 
435 	assert(rsc->base.target == PIPE_BUFFER);
436 	util_range_add(&rsc->valid_buffer_range,
437 		buffer_offset, buffer_offset + buffer_size);
438 
439 	return target;
440 }
441 
442 static void
fd_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)443 fd_stream_output_target_destroy(struct pipe_context *pctx,
444 		struct pipe_stream_output_target *target)
445 {
446 	pipe_resource_reference(&target->buffer, NULL);
447 	FREE(target);
448 }
449 
450 static void
fd_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)451 fd_set_stream_output_targets(struct pipe_context *pctx,
452 		unsigned num_targets, struct pipe_stream_output_target **targets,
453 		const unsigned *offsets)
454 {
455 	struct fd_context *ctx = fd_context(pctx);
456 	struct fd_streamout_stateobj *so = &ctx->streamout;
457 	unsigned i;
458 
459 	debug_assert(num_targets <= ARRAY_SIZE(so->targets));
460 
461 	for (i = 0; i < num_targets; i++) {
462 		boolean changed = targets[i] != so->targets[i];
463 		boolean append = (offsets[i] == (unsigned)-1);
464 
465 		if (!changed && append)
466 			continue;
467 
468 		if (!append)
469 			so->offsets[i] = offsets[i];
470 
471 		pipe_so_target_reference(&so->targets[i], targets[i]);
472 	}
473 
474 	for (; i < so->num_targets; i++) {
475 		pipe_so_target_reference(&so->targets[i], NULL);
476 	}
477 
478 	so->num_targets = num_targets;
479 
480 	ctx->dirty |= FD_DIRTY_STREAMOUT;
481 }
482 
483 static void
fd_bind_compute_state(struct pipe_context * pctx,void * state)484 fd_bind_compute_state(struct pipe_context *pctx, void *state)
485 {
486 	struct fd_context *ctx = fd_context(pctx);
487 	ctx->compute = state;
488 	ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
489 }
490 
491 static void
fd_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** prscs)492 fd_set_compute_resources(struct pipe_context *pctx,
493 		unsigned start, unsigned count, struct pipe_surface **prscs)
494 {
495 	// TODO
496 }
497 
498 static void
fd_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** prscs,uint32_t ** handles)499 fd_set_global_binding(struct pipe_context *pctx,
500 		unsigned first, unsigned count, struct pipe_resource **prscs,
501 		uint32_t **handles)
502 {
503 	/* TODO only used by clover.. seems to need us to return the actual
504 	 * gpuaddr of the buffer.. which isn't really exposed to mesa atm.
505 	 * How is this used?
506 	 */
507 }
508 
509 void
fd_state_init(struct pipe_context * pctx)510 fd_state_init(struct pipe_context *pctx)
511 {
512 	pctx->set_blend_color = fd_set_blend_color;
513 	pctx->set_stencil_ref = fd_set_stencil_ref;
514 	pctx->set_clip_state = fd_set_clip_state;
515 	pctx->set_sample_mask = fd_set_sample_mask;
516 	pctx->set_constant_buffer = fd_set_constant_buffer;
517 	pctx->set_shader_buffers = fd_set_shader_buffers;
518 	pctx->set_shader_images = fd_set_shader_images;
519 	pctx->set_framebuffer_state = fd_set_framebuffer_state;
520 	pctx->set_polygon_stipple = fd_set_polygon_stipple;
521 	pctx->set_scissor_states = fd_set_scissor_states;
522 	pctx->set_viewport_states = fd_set_viewport_states;
523 
524 	pctx->set_vertex_buffers = fd_set_vertex_buffers;
525 
526 	pctx->bind_blend_state = fd_blend_state_bind;
527 	pctx->delete_blend_state = fd_blend_state_delete;
528 
529 	pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
530 	pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
531 
532 	pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
533 	pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
534 
535 	pctx->create_vertex_elements_state = fd_vertex_state_create;
536 	pctx->delete_vertex_elements_state = fd_vertex_state_delete;
537 	pctx->bind_vertex_elements_state = fd_vertex_state_bind;
538 
539 	pctx->create_stream_output_target = fd_create_stream_output_target;
540 	pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
541 	pctx->set_stream_output_targets = fd_set_stream_output_targets;
542 
543 	if (has_compute(fd_screen(pctx->screen))) {
544 		pctx->bind_compute_state = fd_bind_compute_state;
545 		pctx->set_compute_resources = fd_set_compute_resources;
546 		pctx->set_global_binding = fd_set_global_binding;
547 	}
548 }
549