• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "pipe/p_state.h"
28 #include "util/u_dual_blend.h"
29 #include "util/u_helpers.h"
30 #include "util/u_memory.h"
31 #include "util/u_string.h"
32 #include "util/u_upload_mgr.h"
33 
34 #include "common/freedreno_guardband.h"
35 
36 #include "freedreno_context.h"
37 #include "freedreno_gmem.h"
38 #include "freedreno_query_hw.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_state.h"
41 #include "freedreno_texture.h"
42 #include "freedreno_util.h"
43 
44 #define get_safe(ptr, field) ((ptr) ? (ptr)->field : 0)
45 
46 /* All the generic state handling.. In case of CSO's that are specific
47  * to the GPU version, when the bind and the delete are common they can
48  * go in here.
49  */
50 
51 static void
update_draw_cost(struct fd_context * ctx)52 update_draw_cost(struct fd_context *ctx) assert_dt
53 {
54    struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
55 
56    ctx->draw_cost = pfb->nr_cbufs;
57    for (unsigned i = 0; i < pfb->nr_cbufs; i++)
58       if (fd_blend_enabled(ctx, i))
59          ctx->draw_cost++;
60    if (fd_depth_enabled(ctx))
61       ctx->draw_cost++;
62    if (fd_depth_write_enabled(ctx))
63       ctx->draw_cost++;
64 }
65 
66 static void
fd_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * blend_color)67 fd_set_blend_color(struct pipe_context *pctx,
68                    const struct pipe_blend_color *blend_color) in_dt
69 {
70    struct fd_context *ctx = fd_context(pctx);
71    ctx->blend_color = *blend_color;
72    fd_context_dirty(ctx, FD_DIRTY_BLEND_COLOR);
73 }
74 
75 static void
fd_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref stencil_ref)76 fd_set_stencil_ref(struct pipe_context *pctx,
77                    const struct pipe_stencil_ref stencil_ref) in_dt
78 {
79    struct fd_context *ctx = fd_context(pctx);
80    ctx->stencil_ref = stencil_ref;
81    fd_context_dirty(ctx, FD_DIRTY_STENCIL_REF);
82 }
83 
84 static void
fd_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * clip)85 fd_set_clip_state(struct pipe_context *pctx,
86                   const struct pipe_clip_state *clip) in_dt
87 {
88    struct fd_context *ctx = fd_context(pctx);
89    ctx->ucp = *clip;
90    fd_context_dirty(ctx, FD_DIRTY_UCP);
91 }
92 
93 static void
fd_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)94 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) in_dt
95 {
96    struct fd_context *ctx = fd_context(pctx);
97    ctx->sample_mask = (uint16_t)sample_mask;
98    fd_context_dirty(ctx, FD_DIRTY_SAMPLE_MASK);
99 }
100 
101 static void
fd_set_sample_locations(struct pipe_context * pctx,size_t size,const uint8_t * locations)102 fd_set_sample_locations(struct pipe_context *pctx, size_t size,
103                         const uint8_t *locations)
104   in_dt
105 {
106    struct fd_context *ctx = fd_context(pctx);
107 
108    if (!locations) {
109       ctx->sample_locations_enabled = false;
110       return;
111    }
112 
113    size = MIN2(size, sizeof(ctx->sample_locations));
114    memcpy(ctx->sample_locations, locations, size);
115    ctx->sample_locations_enabled = true;
116 
117    fd_context_dirty(ctx, FD_DIRTY_SAMPLE_LOCATIONS);
118 }
119 
120 static void
fd_set_min_samples(struct pipe_context * pctx,unsigned min_samples)121 fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt
122 {
123    struct fd_context *ctx = fd_context(pctx);
124    ctx->min_samples = min_samples;
125    fd_context_dirty(ctx, FD_DIRTY_MIN_SAMPLES);
126 }
127 
128 static void
upload_user_buffer(struct pipe_context * pctx,struct pipe_constant_buffer * cb)129 upload_user_buffer(struct pipe_context *pctx, struct pipe_constant_buffer *cb)
130 {
131    u_upload_data(pctx->stream_uploader, 0, cb->buffer_size, 64,
132                  cb->user_buffer, &cb->buffer_offset, &cb->buffer);
133    cb->user_buffer = NULL;
134 }
135 
136 /* notes from calim on #dri-devel:
137  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
138  * out to vec4's
139  * I should be able to consider that I own the user_ptr until the next
140  * set_constant_buffer() call, at which point I don't really care about the
141  * previous values.
142  * index>0 will be UBO's.. well, I'll worry about that later
143  */
144 static void
fd_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * cb)145 fd_set_constant_buffer(struct pipe_context *pctx, enum pipe_shader_type shader,
146                        uint index, bool take_ownership,
147                        const struct pipe_constant_buffer *cb) in_dt
148 {
149    struct fd_context *ctx = fd_context(pctx);
150    struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
151 
152    util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
153 
154    /* Note that gallium frontends can unbind constant buffers by
155     * passing NULL here.
156     */
157    if (unlikely(!cb)) {
158       so->enabled_mask &= ~(1 << index);
159       return;
160    }
161 
162    if (cb->user_buffer && ctx->screen->gen >= 6) {
163       upload_user_buffer(pctx, &so->cb[index]);
164       cb = &so->cb[index];
165    }
166 
167    so->enabled_mask |= 1 << index;
168 
169    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST);
170    fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
171    fd_dirty_shader_resource(ctx, cb->buffer, shader, FD_DIRTY_SHADER_CONST, false);
172 }
173 
174 void
fd_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)175 fd_set_shader_buffers(struct pipe_context *pctx, enum pipe_shader_type shader,
176                       unsigned start, unsigned count,
177                       const struct pipe_shader_buffer *buffers,
178                       unsigned writable_bitmask) in_dt
179 {
180    struct fd_context *ctx = fd_context(pctx);
181    struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
182    const unsigned modified_bits = u_bit_consecutive(start, count);
183 
184    so->writable_mask &= ~modified_bits;
185    so->writable_mask |= writable_bitmask << start;
186 
187    for (unsigned i = 0; i < count; i++) {
188       unsigned n = i + start;
189       struct pipe_shader_buffer *buf = &so->sb[n];
190 
191       if (buffers && buffers[i].buffer) {
192          buf->buffer_offset = buffers[i].buffer_offset;
193          buf->buffer_size = buffers[i].buffer_size;
194          pipe_resource_reference(&buf->buffer, buffers[i].buffer);
195 
196          bool write = writable_bitmask & BIT(i);
197 
198          fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
199          fd_dirty_shader_resource(ctx, buffers[i].buffer, shader,
200                                   FD_DIRTY_SHADER_SSBO, write);
201 
202          so->enabled_mask |= BIT(n);
203 
204          if (write) {
205             struct fd_resource *rsc = fd_resource(buf->buffer);
206             util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
207                            buf->buffer_offset,
208                            buf->buffer_offset + buf->buffer_size);
209          }
210       } else {
211          pipe_resource_reference(&buf->buffer, NULL);
212 
213          so->enabled_mask &= ~BIT(n);
214       }
215    }
216 
217    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_SSBO);
218 }
219 
220 void
fd_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)221 fd_set_shader_images(struct pipe_context *pctx, enum pipe_shader_type shader,
222                      unsigned start, unsigned count,
223                      unsigned unbind_num_trailing_slots,
224                      const struct pipe_image_view *images) in_dt
225 {
226    struct fd_context *ctx = fd_context(pctx);
227    struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
228 
229    unsigned mask = 0;
230 
231    if (images) {
232       for (unsigned i = 0; i < count; i++) {
233          unsigned n = i + start;
234          struct pipe_image_view *buf = &so->si[n];
235 
236          if ((buf->resource == images[i].resource) &&
237              (buf->format == images[i].format) &&
238              (buf->access == images[i].access) &&
239              !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
240             continue;
241 
242          mask |= BIT(n);
243          util_copy_image_view(buf, &images[i]);
244 
245          if (buf->resource) {
246             bool write = buf->access & PIPE_IMAGE_ACCESS_WRITE;
247 
248             fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
249             fd_dirty_shader_resource(ctx, buf->resource, shader,
250                                      FD_DIRTY_SHADER_IMAGE, write);
251             so->enabled_mask |= BIT(n);
252 
253             if (write && (buf->resource->target == PIPE_BUFFER)) {
254                struct fd_resource *rsc = fd_resource(buf->resource);
255                util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
256                               buf->u.buf.offset,
257                               buf->u.buf.offset + buf->u.buf.size);
258             }
259          } else {
260             so->enabled_mask &= ~BIT(n);
261          }
262       }
263    } else {
264       mask = (BIT(count) - 1) << start;
265 
266       for (unsigned i = 0; i < count; i++) {
267          unsigned n = i + start;
268          struct pipe_image_view *img = &so->si[n];
269 
270          pipe_resource_reference(&img->resource, NULL);
271       }
272 
273       so->enabled_mask &= ~mask;
274    }
275 
276    for (unsigned i = 0; i < unbind_num_trailing_slots; i++)
277       pipe_resource_reference(&so->si[i + start + count].resource, NULL);
278 
279    so->enabled_mask &=
280       ~(BITFIELD_MASK(unbind_num_trailing_slots) << (start + count));
281 
282    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_IMAGE);
283 }
284 
285 void
fd_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * framebuffer)286 fd_set_framebuffer_state(struct pipe_context *pctx,
287                          const struct pipe_framebuffer_state *framebuffer)
288 {
289    struct fd_context *ctx = fd_context(pctx);
290    struct pipe_framebuffer_state *cso;
291 
292    DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height,
293        framebuffer->layers, framebuffer->samples);
294 
295    cso = &ctx->framebuffer;
296 
297    if (util_framebuffer_state_equal(cso, framebuffer))
298       return;
299 
300    /* Do this *after* checking that the framebuffer state is actually
301     * changing.  In the fd_blitter_clear() path, we get a pfb update
302     * to restore the current pfb state, which should not trigger us
303     * to flush (as that can cause the batch to be freed at a point
304     * before fd_clear() returns, but after the point where it expects
305     * flushes to potentially happen.
306     */
307    fd_context_switch_from(ctx);
308 
309    util_copy_framebuffer_state(cso, framebuffer);
310 
311    STATIC_ASSERT((4 * PIPE_MAX_COLOR_BUFS) == (8 * sizeof(ctx->all_mrt_channel_mask)));
312    ctx->all_mrt_channel_mask = 0;
313 
314    /* Generate a bitmask of all valid channels for all MRTs.  Blend
315     * state with unwritten channels essentially acts as blend enabled,
316     * which disables LRZ write.  But only if the cbuf *has* the masked
317     * channels, which is not known at the time the blend state is
318     * created.
319     */
320    for (unsigned i = 0; i < framebuffer->nr_cbufs; i++) {
321       if (!framebuffer->cbufs[i])
322          continue;
323 
324       enum pipe_format format = framebuffer->cbufs[i]->format;
325       unsigned nr = util_format_get_nr_components(format);
326 
327       ctx->all_mrt_channel_mask |= BITFIELD_MASK(nr) << (4 * i);
328    }
329 
330    cso->samples = util_framebuffer_get_num_samples(cso);
331 
332    if (ctx->screen->reorder) {
333       struct fd_batch *old_batch = NULL;
334 
335       fd_batch_reference(&old_batch, ctx->batch);
336 
337       if (likely(old_batch))
338          fd_batch_finish_queries(old_batch);
339 
340       fd_batch_reference(&ctx->batch, NULL);
341       fd_context_all_dirty(ctx);
342 
343       fd_batch_reference(&old_batch, NULL);
344    } else if (ctx->batch) {
345       DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
346           framebuffer->cbufs[0], framebuffer->zsbuf);
347       fd_batch_flush(ctx->batch);
348    }
349 
350    fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);
351 
352    for (unsigned i = 0; i < PIPE_MAX_VIEWPORTS; i++) {
353       ctx->disabled_scissor[i].minx = 0;
354       ctx->disabled_scissor[i].miny = 0;
355       ctx->disabled_scissor[i].maxx = cso->width - 1;
356       ctx->disabled_scissor[i].maxy = cso->height - 1;
357    }
358 
359    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
360    update_draw_cost(ctx);
361 }
362 
363 static void
fd_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * stipple)364 fd_set_polygon_stipple(struct pipe_context *pctx,
365                        const struct pipe_poly_stipple *stipple) in_dt
366 {
367    struct fd_context *ctx = fd_context(pctx);
368    ctx->stipple = *stipple;
369    fd_context_dirty(ctx, FD_DIRTY_STIPPLE);
370 }
371 
372 static void
fd_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissor)373 fd_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
374                       unsigned num_scissors,
375                       const struct pipe_scissor_state *scissor) in_dt
376 {
377    struct fd_context *ctx = fd_context(pctx);
378 
379    for (unsigned i = 0; i < num_scissors; i++) {
380       unsigned idx = start_slot + i;
381 
382       if ((scissor[i].minx == scissor[i].maxx) ||
383           (scissor[i].miny == scissor[i].maxy)) {
384          ctx->scissor[idx].minx = ctx->scissor[idx].miny = 1;
385          ctx->scissor[idx].maxx = ctx->scissor[idx].maxy = 0;
386       } else {
387          ctx->scissor[idx].minx = scissor[i].minx;
388          ctx->scissor[idx].miny = scissor[i].miny;
389          ctx->scissor[idx].maxx = MAX2(scissor[i].maxx, 1) - 1;
390          ctx->scissor[idx].maxy = MAX2(scissor[i].maxy, 1) - 1;
391       }
392    }
393 
394    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
395 }
396 
397 static void
init_scissor_states(struct pipe_context * pctx)398 init_scissor_states(struct pipe_context *pctx)
399    in_dt
400 {
401    struct fd_context *ctx = fd_context(pctx);
402 
403    for (unsigned idx = 0; idx < ARRAY_SIZE(ctx->scissor); idx++) {
404       ctx->scissor[idx].minx = ctx->scissor[idx].miny = 1;
405       ctx->scissor[idx].maxx = ctx->scissor[idx].maxy = 0;
406    }
407 }
408 
409 static void
fd_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewports)410 fd_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
411                        unsigned num_viewports,
412                        const struct pipe_viewport_state *viewports) in_dt
413 {
414    struct fd_context *ctx = fd_context(pctx);
415 
416    for (unsigned i = 0; i < num_viewports; i++) {
417       unsigned idx = start_slot + i;
418       struct pipe_scissor_state *scissor = &ctx->viewport_scissor[idx];
419       const struct pipe_viewport_state *viewport = &viewports[i];
420 
421       ctx->viewport[idx] = *viewport;
422 
423       /* see si_get_scissor_from_viewport(): */
424 
425       /* Convert (-1, -1) and (1, 1) from clip space into window space. */
426       float minx = -viewport->scale[0] + viewport->translate[0];
427       float miny = -viewport->scale[1] + viewport->translate[1];
428       float maxx = viewport->scale[0] + viewport->translate[0];
429       float maxy = viewport->scale[1] + viewport->translate[1];
430 
431       /* Handle inverted viewports. */
432       if (minx > maxx) {
433          SWAP(minx, maxx);
434       }
435       if (miny > maxy) {
436          SWAP(miny, maxy);
437       }
438 
439       const float max_dims = ctx->screen->gen >= 4 ? 16384.f : 4096.f;
440 
441       /* Clamp, convert to integer and round up the max bounds. */
442       scissor->minx = CLAMP(minx, 0.f, max_dims);
443       scissor->miny = CLAMP(miny, 0.f, max_dims);
444       scissor->maxx = MAX2(CLAMP(ceilf(maxx), 0.f, max_dims), 1) - 1;
445       scissor->maxy = MAX2(CLAMP(ceilf(maxy), 0.f, max_dims), 1) - 1;
446    }
447 
448    fd_context_dirty(ctx, FD_DIRTY_VIEWPORT);
449 
450    /* Guardband is only used on a6xx so far: */
451    if (!is_a6xx(ctx->screen))
452       return;
453 
454    ctx->guardband.x = ~0;
455    ctx->guardband.y = ~0;
456 
457    bool is3x = is_a3xx(ctx->screen);
458 
459    for (unsigned i = 0; i < PIPE_MAX_VIEWPORTS; i++) {
460       const struct pipe_viewport_state *vp = & ctx->viewport[i];
461 
462       unsigned gx = fd_calc_guardband(vp->translate[0], vp->scale[0], is3x);
463       unsigned gy = fd_calc_guardband(vp->translate[1], vp->scale[1], is3x);
464 
465       ctx->guardband.x = MIN2(ctx->guardband.x, gx);
466       ctx->guardband.y = MIN2(ctx->guardband.y, gy);
467    }
468 }
469 
470 static void
fd_set_vertex_buffers(struct pipe_context * pctx,unsigned count,const struct pipe_vertex_buffer * vb)471 fd_set_vertex_buffers(struct pipe_context *pctx, unsigned count,
472                       const struct pipe_vertex_buffer *vb) in_dt
473 {
474    struct fd_context *ctx = fd_context(pctx);
475    struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
476    int i;
477 
478    /* on a2xx, pitch is encoded in the vtx fetch instruction, so
479     * we need to mark VTXSTATE as dirty as well to trigger patching
480     * and re-emitting the vtx shader:
481     */
482    if (ctx->screen->gen < 3) {
483       for (i = 0; i < count; i++) {
484          bool new_enabled = vb && vb[i].buffer.resource;
485          bool old_enabled = so->vb[i].buffer.resource != NULL;
486          if (new_enabled != old_enabled) {
487             fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
488             break;
489          }
490       }
491    }
492 
493    util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, count,
494                                 true);
495    so->count = util_last_bit(so->enabled_mask);
496 
497    if (!vb)
498       return;
499 
500    fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
501 
502    for (unsigned i = 0; i < count; i++) {
503       assert(!vb[i].is_user_buffer);
504       fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
505       fd_dirty_resource(ctx, vb[i].buffer.resource, FD_DIRTY_VTXBUF, false);
506 
507       /* Robust buffer access: Return undefined data (the start of the buffer)
508        * instead of process termination or a GPU hang in case of overflow.
509        */
510       if (vb[i].buffer.resource &&
511           unlikely(vb[i].buffer_offset >= vb[i].buffer.resource->width0)) {
512          so->vb[i].buffer_offset = 0;
513       }
514    }
515 }
516 
517 static void
fd_blend_state_bind(struct pipe_context * pctx,void * hwcso)518 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
519 {
520    struct fd_context *ctx = fd_context(pctx);
521    struct pipe_blend_state *cso = hwcso;
522    bool old_is_dual = ctx->blend ? ctx->blend->rt[0].blend_enable &&
523                                       util_blend_state_is_dual(ctx->blend, 0)
524                                  : false;
525    bool new_is_dual =
526       cso ? cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : false;
527    fd_context_dirty(ctx, FD_DIRTY_BLEND);
528    if (old_is_dual != new_is_dual)
529       fd_context_dirty(ctx, FD_DIRTY_BLEND_DUAL);
530 
531    bool old_coherent = get_safe(ctx->blend, blend_coherent);
532    bool new_coherent = get_safe(cso, blend_coherent);
533    if (new_coherent != old_coherent) {
534       fd_context_dirty(ctx, FD_DIRTY_BLEND_COHERENT);
535    }
536    ctx->blend = hwcso;
537    update_draw_cost(ctx);
538 }
539 
540 static void
fd_blend_state_delete(struct pipe_context * pctx,void * hwcso)541 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
542 {
543    FREE(hwcso);
544 }
545 
546 static void
fd_rasterizer_state_bind(struct pipe_context * pctx,void * hwcso)547 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
548 {
549    struct fd_context *ctx = fd_context(pctx);
550    struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
551    bool discard = get_safe(ctx->rasterizer, rasterizer_discard);
552    unsigned clip_plane_enable = get_safe(ctx->rasterizer, clip_plane_enable);
553 
554    ctx->rasterizer = hwcso;
555    fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
556 
557    if (ctx->rasterizer && ctx->rasterizer->scissor) {
558       ctx->current_scissor = ctx->scissor;
559    } else {
560       ctx->current_scissor = ctx->disabled_scissor;
561    }
562 
563    /* if scissor enable bit changed we need to mark scissor
564     * state as dirty as well:
565     * NOTE: we can do a shallow compare, since we only care
566     * if it changed to/from &ctx->disable_scissor
567     */
568    if (old_scissor != fd_context_get_scissor(ctx))
569       fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
570 
571    if (discard != get_safe(ctx->rasterizer, rasterizer_discard))
572       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_DISCARD);
573 
574    if (clip_plane_enable != get_safe(ctx->rasterizer, clip_plane_enable))
575       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_CLIP_PLANE_ENABLE);
576 }
577 
578 static void
fd_rasterizer_state_delete(struct pipe_context * pctx,void * hwcso)579 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
580 {
581    FREE(hwcso);
582 }
583 
584 static void
fd_zsa_state_bind(struct pipe_context * pctx,void * hwcso)585 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
586 {
587    struct fd_context *ctx = fd_context(pctx);
588    ctx->zsa = hwcso;
589    fd_context_dirty(ctx, FD_DIRTY_ZSA);
590    update_draw_cost(ctx);
591 }
592 
593 static void
fd_zsa_state_delete(struct pipe_context * pctx,void * hwcso)594 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
595 {
596    FREE(hwcso);
597 }
598 
599 static void *
fd_vertex_state_create(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)600 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
601                        const struct pipe_vertex_element *elements)
602 {
603    struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
604 
605    if (!so)
606       return NULL;
607 
608    memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
609    so->num_elements = num_elements;
610    for (unsigned i = 0; i < num_elements; i++)
611       so->strides[elements[i].vertex_buffer_index] = elements[i].src_stride;
612 
613    return so;
614 }
615 
616 static void
fd_vertex_state_delete(struct pipe_context * pctx,void * hwcso)617 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
618 {
619    FREE(hwcso);
620 }
621 
622 static void
fd_vertex_state_bind(struct pipe_context * pctx,void * hwcso)623 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
624 {
625    struct fd_context *ctx = fd_context(pctx);
626    ctx->vtx.vtx = hwcso;
627    fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
628 }
629 
630 static struct pipe_stream_output_target *
fd_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)631 fd_create_stream_output_target(struct pipe_context *pctx,
632                                struct pipe_resource *prsc,
633                                unsigned buffer_offset, unsigned buffer_size)
634 {
635    struct fd_stream_output_target *target;
636    struct fd_resource *rsc = fd_resource(prsc);
637 
638    target = CALLOC_STRUCT(fd_stream_output_target);
639    if (!target)
640       return NULL;
641 
642    pipe_reference_init(&target->base.reference, 1);
643    pipe_resource_reference(&target->base.buffer, prsc);
644 
645    target->base.context = pctx;
646    target->base.buffer_offset = buffer_offset;
647    target->base.buffer_size = buffer_size;
648 
649    target->offset_buf = pipe_buffer_create(
650       pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(uint32_t));
651 
652    assert(rsc->b.b.target == PIPE_BUFFER);
653    util_range_add(&rsc->b.b, &rsc->valid_buffer_range, buffer_offset,
654                   buffer_offset + buffer_size);
655 
656    return &target->base;
657 }
658 
659 static void
fd_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)660 fd_stream_output_target_destroy(struct pipe_context *pctx,
661                                 struct pipe_stream_output_target *target)
662 {
663    struct fd_stream_output_target *cso = fd_stream_output_target(target);
664 
665    pipe_resource_reference(&cso->base.buffer, NULL);
666    pipe_resource_reference(&cso->offset_buf, NULL);
667 
668    FREE(target);
669 }
670 
671 static void
fd_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)672 fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets,
673                              struct pipe_stream_output_target **targets,
674                              const unsigned *offsets) in_dt
675 {
676    struct fd_context *ctx = fd_context(pctx);
677    struct fd_streamout_stateobj *so = &ctx->streamout;
678    unsigned i;
679 
680    assert(num_targets <= ARRAY_SIZE(so->targets));
681 
682    /* Older targets need sw stats enabled for streamout emulation in VS: */
683    if (ctx->screen->gen < 5) {
684       if (num_targets && !so->num_targets) {
685          ctx->stats_users++;
686       } else if (so->num_targets && !num_targets) {
687          ctx->stats_users--;
688       }
689    }
690 
691    for (i = 0; i < num_targets; i++) {
692       bool changed = targets[i] != so->targets[i];
693       bool reset = (offsets[i] != (unsigned)-1);
694 
695       so->reset |= (reset << i);
696 
697       if (targets[i]) {
698          fd_resource_set_usage(targets[i]->buffer, FD_DIRTY_STREAMOUT);
699          fd_dirty_resource(ctx, targets[i]->buffer, FD_DIRTY_STREAMOUT, true);
700 
701          struct fd_stream_output_target *target = fd_stream_output_target(targets[i]);
702          fd_resource_set_usage(target->offset_buf, FD_DIRTY_STREAMOUT);
703          fd_dirty_resource(ctx, target->offset_buf, FD_DIRTY_STREAMOUT, true);
704       }
705 
706       if (!changed && !reset)
707          continue;
708 
709       /* Note that all SO targets will be reset at once at a
710        * BeginTransformFeedback().
711        */
712       if (reset) {
713          so->offsets[i] = offsets[i];
714          ctx->streamout.verts_written = 0;
715       }
716 
717       pipe_so_target_reference(&so->targets[i], targets[i]);
718    }
719 
720    for (; i < so->num_targets; i++) {
721       pipe_so_target_reference(&so->targets[i], NULL);
722    }
723 
724    so->num_targets = num_targets;
725 
726    fd_context_dirty(ctx, FD_DIRTY_STREAMOUT);
727 }
728 
729 static void
fd_bind_compute_state(struct pipe_context * pctx,void * state)730 fd_bind_compute_state(struct pipe_context *pctx, void *state) in_dt
731 {
732    struct fd_context *ctx = fd_context(pctx);
733    ctx->compute = state;
734    fd_context_dirty_shader(ctx, PIPE_SHADER_COMPUTE, FD_DIRTY_SHADER_PROG);
735 }
736 
737 /* TODO pipe_context::set_compute_resources() should DIAF and clover
738  * should be updated to use pipe_context::set_constant_buffer() and
739  * pipe_context::set_shader_images().  Until then just directly frob
740  * the UBO/image state to avoid the rest of the driver needing to
741  * know about this bastard api..
742  */
743 static void
fd_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** prscs)744 fd_set_compute_resources(struct pipe_context *pctx, unsigned start,
745                          unsigned count, struct pipe_surface **prscs) in_dt
746 {
747    struct fd_context *ctx = fd_context(pctx);
748    struct fd_constbuf_stateobj *so = &ctx->constbuf[PIPE_SHADER_COMPUTE];
749 
750    for (unsigned i = 0; i < count; i++) {
751       const uint32_t index = i + start + 1;   /* UBOs start at index 1 */
752 
753       if (!prscs) {
754          util_copy_constant_buffer(&so->cb[index], NULL, false);
755          so->enabled_mask &= ~(1 << index);
756       } else if (prscs[i]->format == PIPE_FORMAT_NONE) {
757          struct pipe_constant_buffer cb = {
758                .buffer = prscs[i]->texture,
759          };
760          util_copy_constant_buffer(&so->cb[index], &cb, false);
761          so->enabled_mask |= (1 << index);
762       } else {
763          // TODO images
764          unreachable("finishme");
765       }
766    }
767 }
768 
769 /* used by clover to bind global objects, returning the bo address
770  * via handles[n]
771  */
772 static void
fd_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** prscs,uint32_t ** handles)773 fd_set_global_binding(struct pipe_context *pctx, unsigned first, unsigned count,
774                       struct pipe_resource **prscs, uint32_t **handles) in_dt
775 {
776    struct fd_context *ctx = fd_context(pctx);
777    struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
778    unsigned mask = 0;
779 
780    if (prscs) {
781       for (unsigned i = 0; i < count; i++) {
782          unsigned n = i + first;
783 
784          mask |= BIT(n);
785 
786          pipe_resource_reference(&so->buf[n], prscs[i]);
787 
788          if (so->buf[n]) {
789             struct fd_resource *rsc = fd_resource(so->buf[n]);
790             uint32_t offset = *handles[i];
791             uint64_t iova = fd_bo_get_iova(rsc->bo) + offset;
792 
793             /* Yes, really, despite what the type implies: */
794             memcpy(handles[i], &iova, sizeof(iova));
795          }
796 
797          if (prscs[i])
798             so->enabled_mask |= BIT(n);
799          else
800             so->enabled_mask &= ~BIT(n);
801       }
802    } else {
803       mask = (BIT(count) - 1) << first;
804 
805       for (unsigned i = 0; i < count; i++) {
806          unsigned n = i + first;
807          pipe_resource_reference(&so->buf[n], NULL);
808       }
809 
810       so->enabled_mask &= ~mask;
811    }
812 }
813 
814 void
fd_state_init(struct pipe_context * pctx)815 fd_state_init(struct pipe_context *pctx)
816 {
817    pctx->set_blend_color = fd_set_blend_color;
818    pctx->set_stencil_ref = fd_set_stencil_ref;
819    pctx->set_clip_state = fd_set_clip_state;
820    pctx->set_sample_mask = fd_set_sample_mask;
821    pctx->set_min_samples = fd_set_min_samples;
822    pctx->set_constant_buffer = fd_set_constant_buffer;
823    pctx->set_shader_buffers = fd_set_shader_buffers;
824    pctx->set_shader_images = fd_set_shader_images;
825    pctx->set_framebuffer_state = fd_set_framebuffer_state;
826    pctx->set_sample_locations = fd_set_sample_locations;
827    pctx->set_polygon_stipple = fd_set_polygon_stipple;
828    pctx->set_scissor_states = fd_set_scissor_states;
829    pctx->set_viewport_states = fd_set_viewport_states;
830 
831    pctx->set_vertex_buffers = fd_set_vertex_buffers;
832 
833    pctx->bind_blend_state = fd_blend_state_bind;
834    pctx->delete_blend_state = fd_blend_state_delete;
835 
836    pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
837    pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
838 
839    pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
840    pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
841 
842    if (!pctx->create_vertex_elements_state)
843       pctx->create_vertex_elements_state = fd_vertex_state_create;
844    pctx->delete_vertex_elements_state = fd_vertex_state_delete;
845    pctx->bind_vertex_elements_state = fd_vertex_state_bind;
846 
847    pctx->create_stream_output_target = fd_create_stream_output_target;
848    pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
849    pctx->set_stream_output_targets = fd_set_stream_output_targets;
850 
851    if (has_compute(fd_screen(pctx->screen))) {
852       pctx->bind_compute_state = fd_bind_compute_state;
853       pctx->set_compute_resources = fd_set_compute_resources;
854       pctx->set_global_binding = fd_set_global_binding;
855    }
856 
857    init_scissor_states(pctx);
858 }
859