• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_helpers.h"
30 #include "util/u_memory.h"
31 #include "util/u_string.h"
32 
33 #include "freedreno_context.h"
34 #include "freedreno_gmem.h"
35 #include "freedreno_query_hw.h"
36 #include "freedreno_resource.h"
37 #include "freedreno_state.h"
38 #include "freedreno_texture.h"
39 #include "freedreno_util.h"
40 
41 /* All the generic state handling.. In case of CSO's that are specific
42  * to the GPU version, when the bind and the delete are common they can
43  * go in here.
44  */
45 
46 static void
update_draw_cost(struct fd_context * ctx)47 update_draw_cost(struct fd_context *ctx) assert_dt
48 {
49    struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
50 
51    ctx->draw_cost = pfb->nr_cbufs;
52    for (unsigned i = 0; i < pfb->nr_cbufs; i++)
53       if (fd_blend_enabled(ctx, i))
54          ctx->draw_cost++;
55    if (fd_depth_enabled(ctx))
56       ctx->draw_cost++;
57    if (fd_depth_write_enabled(ctx))
58       ctx->draw_cost++;
59 }
60 
61 static void
fd_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * blend_color)62 fd_set_blend_color(struct pipe_context *pctx,
63                    const struct pipe_blend_color *blend_color) in_dt
64 {
65    struct fd_context *ctx = fd_context(pctx);
66    ctx->blend_color = *blend_color;
67    fd_context_dirty(ctx, FD_DIRTY_BLEND_COLOR);
68 }
69 
70 static void
fd_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref stencil_ref)71 fd_set_stencil_ref(struct pipe_context *pctx,
72                    const struct pipe_stencil_ref stencil_ref) in_dt
73 {
74    struct fd_context *ctx = fd_context(pctx);
75    ctx->stencil_ref = stencil_ref;
76    fd_context_dirty(ctx, FD_DIRTY_STENCIL_REF);
77 }
78 
79 static void
fd_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * clip)80 fd_set_clip_state(struct pipe_context *pctx,
81                   const struct pipe_clip_state *clip) in_dt
82 {
83    struct fd_context *ctx = fd_context(pctx);
84    ctx->ucp = *clip;
85    fd_context_dirty(ctx, FD_DIRTY_UCP);
86 }
87 
88 static void
fd_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)89 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) in_dt
90 {
91    struct fd_context *ctx = fd_context(pctx);
92    ctx->sample_mask = (uint16_t)sample_mask;
93    fd_context_dirty(ctx, FD_DIRTY_SAMPLE_MASK);
94 }
95 
96 static void
fd_set_min_samples(struct pipe_context * pctx,unsigned min_samples)97 fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt
98 {
99    struct fd_context *ctx = fd_context(pctx);
100    ctx->min_samples = min_samples;
101    fd_context_dirty(ctx, FD_DIRTY_MIN_SAMPLES);
102 }
103 
104 /* notes from calim on #dri-devel:
105  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
106  * out to vec4's
107  * I should be able to consider that I own the user_ptr until the next
108  * set_constant_buffer() call, at which point I don't really care about the
109  * previous values.
110  * index>0 will be UBO's.. well, I'll worry about that later
111  */
112 static void
fd_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * cb)113 fd_set_constant_buffer(struct pipe_context *pctx, enum pipe_shader_type shader,
114                        uint index, bool take_ownership,
115                        const struct pipe_constant_buffer *cb) in_dt
116 {
117    struct fd_context *ctx = fd_context(pctx);
118    struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
119 
120    util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
121 
122    /* Note that gallium frontends can unbind constant buffers by
123     * passing NULL here.
124     */
125    if (unlikely(!cb)) {
126       so->enabled_mask &= ~(1 << index);
127       return;
128    }
129 
130    so->enabled_mask |= 1 << index;
131 
132    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST);
133    fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
134 
135    if (index > 0) {
136       assert(!cb->user_buffer);
137       ctx->dirty |= FD_DIRTY_RESOURCE;
138    }
139 }
140 
141 static void
fd_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)142 fd_set_shader_buffers(struct pipe_context *pctx, enum pipe_shader_type shader,
143                       unsigned start, unsigned count,
144                       const struct pipe_shader_buffer *buffers,
145                       unsigned writable_bitmask) in_dt
146 {
147    struct fd_context *ctx = fd_context(pctx);
148    struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
149    const unsigned modified_bits = u_bit_consecutive(start, count);
150 
151    so->enabled_mask &= ~modified_bits;
152    so->writable_mask &= ~modified_bits;
153    so->writable_mask |= writable_bitmask << start;
154 
155    for (unsigned i = 0; i < count; i++) {
156       unsigned n = i + start;
157       struct pipe_shader_buffer *buf = &so->sb[n];
158 
159       if (buffers && buffers[i].buffer) {
160          if ((buf->buffer == buffers[i].buffer) &&
161              (buf->buffer_offset == buffers[i].buffer_offset) &&
162              (buf->buffer_size == buffers[i].buffer_size))
163             continue;
164 
165          buf->buffer_offset = buffers[i].buffer_offset;
166          buf->buffer_size = buffers[i].buffer_size;
167          pipe_resource_reference(&buf->buffer, buffers[i].buffer);
168 
169          fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
170 
171          so->enabled_mask |= BIT(n);
172 
173          if (writable_bitmask & BIT(i)) {
174             struct fd_resource *rsc = fd_resource(buf->buffer);
175             util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
176                            buf->buffer_offset,
177                            buf->buffer_offset + buf->buffer_size);
178          }
179       } else {
180          pipe_resource_reference(&buf->buffer, NULL);
181       }
182    }
183 
184    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_SSBO);
185 }
186 
187 void
fd_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)188 fd_set_shader_images(struct pipe_context *pctx, enum pipe_shader_type shader,
189                      unsigned start, unsigned count,
190                      unsigned unbind_num_trailing_slots,
191                      const struct pipe_image_view *images) in_dt
192 {
193    struct fd_context *ctx = fd_context(pctx);
194    struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
195 
196    unsigned mask = 0;
197 
198    if (images) {
199       for (unsigned i = 0; i < count; i++) {
200          unsigned n = i + start;
201          struct pipe_image_view *buf = &so->si[n];
202 
203          if ((buf->resource == images[i].resource) &&
204              (buf->format == images[i].format) &&
205              (buf->access == images[i].access) &&
206              !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
207             continue;
208 
209          mask |= BIT(n);
210          util_copy_image_view(buf, &images[i]);
211 
212          if (buf->resource) {
213             fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
214             so->enabled_mask |= BIT(n);
215 
216             if ((buf->access & PIPE_IMAGE_ACCESS_WRITE) &&
217                 (buf->resource->target == PIPE_BUFFER)) {
218 
219                struct fd_resource *rsc = fd_resource(buf->resource);
220                util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
221                               buf->u.buf.offset,
222                               buf->u.buf.offset + buf->u.buf.size);
223             }
224          } else {
225             so->enabled_mask &= ~BIT(n);
226          }
227       }
228    } else {
229       mask = (BIT(count) - 1) << start;
230 
231       for (unsigned i = 0; i < count; i++) {
232          unsigned n = i + start;
233          struct pipe_image_view *img = &so->si[n];
234 
235          pipe_resource_reference(&img->resource, NULL);
236       }
237 
238       so->enabled_mask &= ~mask;
239    }
240 
241    for (unsigned i = 0; i < unbind_num_trailing_slots; i++)
242       pipe_resource_reference(&so->si[i + start + count].resource, NULL);
243 
244    so->enabled_mask &=
245       ~(BITFIELD_MASK(unbind_num_trailing_slots) << (start + count));
246 
247    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_IMAGE);
248 }
249 
250 void
fd_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * framebuffer)251 fd_set_framebuffer_state(struct pipe_context *pctx,
252                          const struct pipe_framebuffer_state *framebuffer)
253 {
254    struct fd_context *ctx = fd_context(pctx);
255    struct pipe_framebuffer_state *cso;
256 
257    DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height,
258        framebuffer->layers, framebuffer->samples);
259 
260    cso = &ctx->framebuffer;
261 
262    if (util_framebuffer_state_equal(cso, framebuffer))
263       return;
264 
265    /* Do this *after* checking that the framebuffer state is actually
266     * changing.  In the fd_blitter_clear() path, we get a pfb update
267     * to restore the current pfb state, which should not trigger us
268     * to flush (as that can cause the batch to be freed at a point
269     * before fd_clear() returns, but after the point where it expects
270     * flushes to potentially happen.
271     */
272    fd_context_switch_from(ctx);
273 
274    util_copy_framebuffer_state(cso, framebuffer);
275 
276    cso->samples = util_framebuffer_get_num_samples(cso);
277 
278    if (ctx->screen->reorder) {
279       struct fd_batch *old_batch = NULL;
280 
281       fd_batch_reference(&old_batch, ctx->batch);
282 
283       if (likely(old_batch))
284          fd_batch_finish_queries(old_batch);
285 
286       fd_batch_reference(&ctx->batch, NULL);
287       fd_context_all_dirty(ctx);
288       ctx->update_active_queries = true;
289 
290       fd_batch_reference(&old_batch, NULL);
291    } else if (ctx->batch) {
292       DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
293           framebuffer->cbufs[0], framebuffer->zsbuf);
294       fd_batch_flush(ctx->batch);
295    }
296 
297    fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);
298 
299    ctx->disabled_scissor.minx = 0;
300    ctx->disabled_scissor.miny = 0;
301    ctx->disabled_scissor.maxx = cso->width;
302    ctx->disabled_scissor.maxy = cso->height;
303 
304    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
305    update_draw_cost(ctx);
306 }
307 
308 static void
fd_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * stipple)309 fd_set_polygon_stipple(struct pipe_context *pctx,
310                        const struct pipe_poly_stipple *stipple) in_dt
311 {
312    struct fd_context *ctx = fd_context(pctx);
313    ctx->stipple = *stipple;
314    fd_context_dirty(ctx, FD_DIRTY_STIPPLE);
315 }
316 
317 static void
fd_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissor)318 fd_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
319                       unsigned num_scissors,
320                       const struct pipe_scissor_state *scissor) in_dt
321 {
322    struct fd_context *ctx = fd_context(pctx);
323 
324    ctx->scissor = *scissor;
325    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
326 }
327 
328 static void
fd_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewport)329 fd_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
330                        unsigned num_viewports,
331                        const struct pipe_viewport_state *viewport) in_dt
332 {
333    struct fd_context *ctx = fd_context(pctx);
334    struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
335    float minx, miny, maxx, maxy;
336 
337    ctx->viewport = *viewport;
338 
339    /* see si_get_scissor_from_viewport(): */
340 
341    /* Convert (-1, -1) and (1, 1) from clip space into window space. */
342    minx = -viewport->scale[0] + viewport->translate[0];
343    miny = -viewport->scale[1] + viewport->translate[1];
344    maxx = viewport->scale[0] + viewport->translate[0];
345    maxy = viewport->scale[1] + viewport->translate[1];
346 
347    /* Handle inverted viewports. */
348    if (minx > maxx) {
349       swap(minx, maxx);
350    }
351    if (miny > maxy) {
352       swap(miny, maxy);
353    }
354 
355    const float max_dims = ctx->screen->gen >= 4 ? 16384.f : 4096.f;
356 
357    /* Clamp, convert to integer and round up the max bounds. */
358    scissor->minx = CLAMP(minx, 0.f, max_dims);
359    scissor->miny = CLAMP(miny, 0.f, max_dims);
360    scissor->maxx = CLAMP(ceilf(maxx), 0.f, max_dims);
361    scissor->maxy = CLAMP(ceilf(maxy), 0.f, max_dims);
362 
363    fd_context_dirty(ctx, FD_DIRTY_VIEWPORT);
364 }
365 
366 static void
fd_set_vertex_buffers(struct pipe_context * pctx,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * vb)367 fd_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
368                       unsigned count, unsigned unbind_num_trailing_slots,
369                       bool take_ownership,
370                       const struct pipe_vertex_buffer *vb) in_dt
371 {
372    struct fd_context *ctx = fd_context(pctx);
373    struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
374    int i;
375 
376    /* on a2xx, pitch is encoded in the vtx fetch instruction, so
377     * we need to mark VTXSTATE as dirty as well to trigger patching
378     * and re-emitting the vtx shader:
379     */
380    if (ctx->screen->gen < 3) {
381       for (i = 0; i < count; i++) {
382          bool new_enabled = vb && vb[i].buffer.resource;
383          bool old_enabled = so->vb[i].buffer.resource != NULL;
384          uint32_t new_stride = vb ? vb[i].stride : 0;
385          uint32_t old_stride = so->vb[i].stride;
386          if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
387             fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
388             break;
389          }
390       }
391    }
392 
393    util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot,
394                                 count, unbind_num_trailing_slots,
395                                 take_ownership);
396    so->count = util_last_bit(so->enabled_mask);
397 
398    if (!vb)
399       return;
400 
401    fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
402 
403    for (unsigned i = 0; i < count; i++) {
404       assert(!vb[i].is_user_buffer);
405       fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
406    }
407 }
408 
409 static void
fd_blend_state_bind(struct pipe_context * pctx,void * hwcso)410 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
411 {
412    struct fd_context *ctx = fd_context(pctx);
413    struct pipe_blend_state *cso = hwcso;
414    bool old_is_dual = ctx->blend ? ctx->blend->rt[0].blend_enable &&
415                                       util_blend_state_is_dual(ctx->blend, 0)
416                                  : false;
417    bool new_is_dual =
418       cso ? cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : false;
419    ctx->blend = hwcso;
420    fd_context_dirty(ctx, FD_DIRTY_BLEND);
421    if (old_is_dual != new_is_dual)
422       fd_context_dirty(ctx, FD_DIRTY_BLEND_DUAL);
423    update_draw_cost(ctx);
424 }
425 
426 static void
fd_blend_state_delete(struct pipe_context * pctx,void * hwcso)427 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
428 {
429    FREE(hwcso);
430 }
431 
432 static void
fd_rasterizer_state_bind(struct pipe_context * pctx,void * hwcso)433 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
434 {
435    struct fd_context *ctx = fd_context(pctx);
436    struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
437    bool discard = ctx->rasterizer && ctx->rasterizer->rasterizer_discard;
438 
439    ctx->rasterizer = hwcso;
440    fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
441 
442    if (ctx->rasterizer && ctx->rasterizer->scissor) {
443       ctx->current_scissor = &ctx->scissor;
444    } else {
445       ctx->current_scissor = &ctx->disabled_scissor;
446    }
447 
448    /* if scissor enable bit changed we need to mark scissor
449     * state as dirty as well:
450     * NOTE: we can do a shallow compare, since we only care
451     * if it changed to/from &ctx->disable_scissor
452     */
453    if (old_scissor != fd_context_get_scissor(ctx))
454       fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
455 
456    if (ctx->rasterizer && (discard != ctx->rasterizer->rasterizer_discard))
457       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_DISCARD);
458 }
459 
460 static void
fd_rasterizer_state_delete(struct pipe_context * pctx,void * hwcso)461 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
462 {
463    FREE(hwcso);
464 }
465 
466 static void
fd_zsa_state_bind(struct pipe_context * pctx,void * hwcso)467 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
468 {
469    struct fd_context *ctx = fd_context(pctx);
470    ctx->zsa = hwcso;
471    fd_context_dirty(ctx, FD_DIRTY_ZSA);
472    update_draw_cost(ctx);
473 }
474 
475 static void
fd_zsa_state_delete(struct pipe_context * pctx,void * hwcso)476 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
477 {
478    FREE(hwcso);
479 }
480 
481 static void *
fd_vertex_state_create(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)482 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
483                        const struct pipe_vertex_element *elements)
484 {
485    struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
486 
487    if (!so)
488       return NULL;
489 
490    memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
491    so->num_elements = num_elements;
492 
493    return so;
494 }
495 
496 static void
fd_vertex_state_delete(struct pipe_context * pctx,void * hwcso)497 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
498 {
499    FREE(hwcso);
500 }
501 
502 static void
fd_vertex_state_bind(struct pipe_context * pctx,void * hwcso)503 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
504 {
505    struct fd_context *ctx = fd_context(pctx);
506    ctx->vtx.vtx = hwcso;
507    fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
508 }
509 
510 static struct pipe_stream_output_target *
fd_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)511 fd_create_stream_output_target(struct pipe_context *pctx,
512                                struct pipe_resource *prsc,
513                                unsigned buffer_offset, unsigned buffer_size)
514 {
515    struct fd_stream_output_target *target;
516    struct fd_resource *rsc = fd_resource(prsc);
517 
518    target = CALLOC_STRUCT(fd_stream_output_target);
519    if (!target)
520       return NULL;
521 
522    pipe_reference_init(&target->base.reference, 1);
523    pipe_resource_reference(&target->base.buffer, prsc);
524 
525    target->base.context = pctx;
526    target->base.buffer_offset = buffer_offset;
527    target->base.buffer_size = buffer_size;
528 
529    target->offset_buf = pipe_buffer_create(
530       pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(uint32_t));
531 
532    assert(rsc->b.b.target == PIPE_BUFFER);
533    util_range_add(&rsc->b.b, &rsc->valid_buffer_range, buffer_offset,
534                   buffer_offset + buffer_size);
535 
536    return &target->base;
537 }
538 
539 static void
fd_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)540 fd_stream_output_target_destroy(struct pipe_context *pctx,
541                                 struct pipe_stream_output_target *target)
542 {
543    struct fd_stream_output_target *cso = fd_stream_output_target(target);
544 
545    pipe_resource_reference(&cso->base.buffer, NULL);
546    pipe_resource_reference(&cso->offset_buf, NULL);
547 
548    FREE(target);
549 }
550 
551 static void
fd_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)552 fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets,
553                              struct pipe_stream_output_target **targets,
554                              const unsigned *offsets) in_dt
555 {
556    struct fd_context *ctx = fd_context(pctx);
557    struct fd_streamout_stateobj *so = &ctx->streamout;
558    unsigned i;
559 
560    debug_assert(num_targets <= ARRAY_SIZE(so->targets));
561 
562    /* Older targets need sw stats enabled for streamout emulation in VS: */
563    if (ctx->screen->gen < 5) {
564       if (num_targets && !so->num_targets) {
565          ctx->stats_users++;
566       } else if (so->num_targets && !num_targets) {
567          ctx->stats_users--;
568       }
569    }
570 
571    for (i = 0; i < num_targets; i++) {
572       boolean changed = targets[i] != so->targets[i];
573       boolean reset = (offsets[i] != (unsigned)-1);
574 
575       so->reset |= (reset << i);
576 
577       if (!changed && !reset)
578          continue;
579 
580       /* Note that all SO targets will be reset at once at a
581        * BeginTransformFeedback().
582        */
583       if (reset) {
584          so->offsets[i] = offsets[i];
585          ctx->streamout.verts_written = 0;
586       }
587 
588       pipe_so_target_reference(&so->targets[i], targets[i]);
589    }
590 
591    for (; i < so->num_targets; i++) {
592       pipe_so_target_reference(&so->targets[i], NULL);
593    }
594 
595    so->num_targets = num_targets;
596 
597    fd_context_dirty(ctx, FD_DIRTY_STREAMOUT);
598 }
599 
600 static void
fd_bind_compute_state(struct pipe_context * pctx,void * state)601 fd_bind_compute_state(struct pipe_context *pctx, void *state) in_dt
602 {
603    struct fd_context *ctx = fd_context(pctx);
604    ctx->compute = state;
605    /* NOTE: Don't mark FD_DIRTY_PROG for compute specific state */
606    ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
607 }
608 
609 static void
fd_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** prscs)610 fd_set_compute_resources(struct pipe_context *pctx, unsigned start,
611                          unsigned count, struct pipe_surface **prscs) in_dt
612 {
613    // TODO
614 }
615 
616 /* used by clover to bind global objects, returning the bo address
617  * via handles[n]
618  */
619 static void
fd_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** prscs,uint32_t ** handles)620 fd_set_global_binding(struct pipe_context *pctx, unsigned first, unsigned count,
621                       struct pipe_resource **prscs, uint32_t **handles) in_dt
622 {
623    struct fd_context *ctx = fd_context(pctx);
624    struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
625    unsigned mask = 0;
626 
627    if (prscs) {
628       for (unsigned i = 0; i < count; i++) {
629          unsigned n = i + first;
630 
631          mask |= BIT(n);
632 
633          pipe_resource_reference(&so->buf[n], prscs[i]);
634 
635          if (so->buf[n]) {
636             struct fd_resource *rsc = fd_resource(so->buf[n]);
637             uint64_t iova = fd_bo_get_iova(rsc->bo);
638             // TODO need to scream if iova > 32b or fix gallium API..
639             *handles[i] += iova;
640          }
641 
642          if (prscs[i])
643             so->enabled_mask |= BIT(n);
644          else
645             so->enabled_mask &= ~BIT(n);
646       }
647    } else {
648       mask = (BIT(count) - 1) << first;
649 
650       for (unsigned i = 0; i < count; i++) {
651          unsigned n = i + first;
652          pipe_resource_reference(&so->buf[n], NULL);
653       }
654 
655       so->enabled_mask &= ~mask;
656    }
657 }
658 
659 void
fd_state_init(struct pipe_context * pctx)660 fd_state_init(struct pipe_context *pctx)
661 {
662    pctx->set_blend_color = fd_set_blend_color;
663    pctx->set_stencil_ref = fd_set_stencil_ref;
664    pctx->set_clip_state = fd_set_clip_state;
665    pctx->set_sample_mask = fd_set_sample_mask;
666    pctx->set_min_samples = fd_set_min_samples;
667    pctx->set_constant_buffer = fd_set_constant_buffer;
668    pctx->set_shader_buffers = fd_set_shader_buffers;
669    pctx->set_shader_images = fd_set_shader_images;
670    pctx->set_framebuffer_state = fd_set_framebuffer_state;
671    pctx->set_polygon_stipple = fd_set_polygon_stipple;
672    pctx->set_scissor_states = fd_set_scissor_states;
673    pctx->set_viewport_states = fd_set_viewport_states;
674 
675    pctx->set_vertex_buffers = fd_set_vertex_buffers;
676 
677    pctx->bind_blend_state = fd_blend_state_bind;
678    pctx->delete_blend_state = fd_blend_state_delete;
679 
680    pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
681    pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
682 
683    pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
684    pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
685 
686    if (!pctx->create_vertex_elements_state)
687       pctx->create_vertex_elements_state = fd_vertex_state_create;
688    pctx->delete_vertex_elements_state = fd_vertex_state_delete;
689    pctx->bind_vertex_elements_state = fd_vertex_state_bind;
690 
691    pctx->create_stream_output_target = fd_create_stream_output_target;
692    pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
693    pctx->set_stream_output_targets = fd_set_stream_output_targets;
694 
695    if (has_compute(fd_screen(pctx->screen))) {
696       pctx->bind_compute_state = fd_bind_compute_state;
697       pctx->set_compute_resources = fd_set_compute_resources;
698       pctx->set_global_binding = fd_set_global_binding;
699    }
700 }
701