1 /*
2 * Copyright (C) 2019 Collabora, Ltd.
3 * Copyright (C) 2019 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors (Collabora):
25 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
26 *
27 */
28
29 #include "pan_context.h"
30 #include "panfrost-quirks.h"
31 #include "pan_bo.h"
32 #include "pan_shader.h"
33 #include "util/u_memory.h"
34 #include "nir_serialize.h"
35
36 /* Compute CSOs are tracked like graphics shader CSOs, but are
37 * considerably simpler. We do not implement multiple
38 * variants/keying. So the CSO create function just goes ahead and
39 * compiles the thing. */
40
41 static void *
panfrost_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)42 panfrost_create_compute_state(
43 struct pipe_context *pctx,
44 const struct pipe_compute_state *cso)
45 {
46 struct panfrost_context *ctx = pan_context(pctx);
47 struct panfrost_screen *screen = pan_screen(pctx->screen);
48
49 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
50 so->cbase = *cso;
51 so->is_compute = true;
52
53 struct panfrost_shader_state *v = calloc(1, sizeof(*v));
54 so->variants = v;
55
56 so->variant_count = 1;
57 so->active_variant = 0;
58
59 if (cso->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
60 struct blob_reader reader;
61 const struct pipe_binary_program_header *hdr = cso->prog;
62
63 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
64
65 const struct nir_shader_compiler_options *options =
66 screen->vtbl.get_compiler_options();
67
68 so->cbase.prog = nir_deserialize(NULL, options, &reader);
69 so->cbase.ir_type = PIPE_SHADER_IR_NIR;
70 }
71
72 panfrost_shader_compile(pctx->screen, &ctx->shaders, &ctx->descs,
73 so->cbase.ir_type, so->cbase.prog, MESA_SHADER_COMPUTE,
74 v);
75
76 /* There are no variants so we won't need the NIR again */
77 ralloc_free((void *)so->cbase.prog);
78 so->cbase.prog = NULL;
79
80 return so;
81 }
82
83 static void
panfrost_bind_compute_state(struct pipe_context * pipe,void * cso)84 panfrost_bind_compute_state(struct pipe_context *pipe, void *cso)
85 {
86 struct panfrost_context *ctx = pan_context(pipe);
87 ctx->shader[PIPE_SHADER_COMPUTE] = cso;
88 }
89
90 static void
panfrost_delete_compute_state(struct pipe_context * pipe,void * cso)91 panfrost_delete_compute_state(struct pipe_context *pipe, void *cso)
92 {
93 struct panfrost_shader_variants *so =
94 (struct panfrost_shader_variants *)cso;
95
96 free(so->variants);
97 free(cso);
98 }
99
100 static void
panfrost_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** resources)101 panfrost_set_compute_resources(struct pipe_context *pctx,
102 unsigned start, unsigned count,
103 struct pipe_surface **resources)
104 {
105 /* TODO */
106 }
107
108 static void
panfrost_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)109 panfrost_set_global_binding(struct pipe_context *pctx,
110 unsigned first, unsigned count,
111 struct pipe_resource **resources,
112 uint32_t **handles)
113 {
114 if (!resources)
115 return;
116
117 struct panfrost_context *ctx = pan_context(pctx);
118 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
119
120 for (unsigned i = first; i < first + count; ++i) {
121 struct panfrost_resource *rsrc = pan_resource(resources[i]);
122 panfrost_batch_write_rsrc(batch, rsrc, PIPE_SHADER_COMPUTE);
123
124 util_range_add(&rsrc->base, &rsrc->valid_buffer_range,
125 0, rsrc->base.width0);
126
127 /* The handle points to uint32_t, but space is allocated for 64 bits */
128 memcpy(handles[i], &rsrc->image.data.bo->ptr.gpu, sizeof(mali_ptr));
129 }
130 }
131
132 static void
panfrost_memory_barrier(struct pipe_context * pctx,unsigned flags)133 panfrost_memory_barrier(struct pipe_context *pctx, unsigned flags)
134 {
135 /* TODO: Be smart and only flush the minimum needed, maybe emitting a
136 * cache flush job if that would help */
137 panfrost_flush_all_batches(pan_context(pctx), "Memory barrier");
138 }
139
140 void
panfrost_compute_context_init(struct pipe_context * pctx)141 panfrost_compute_context_init(struct pipe_context *pctx)
142 {
143 pctx->create_compute_state = panfrost_create_compute_state;
144 pctx->bind_compute_state = panfrost_bind_compute_state;
145 pctx->delete_compute_state = panfrost_delete_compute_state;
146
147 pctx->set_compute_resources = panfrost_set_compute_resources;
148 pctx->set_global_binding = panfrost_set_global_binding;
149
150 pctx->memory_barrier = panfrost_memory_barrier;
151 }
152