1 /**********************************************************
2 * Copyright 2022-2023 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "compiler/nir/nir.h"
27 #include "compiler/glsl/gl_nir.h"
28 #include "nir/nir_to_tgsi.h"
29
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_bitmask.h"
33 #include "tgsi/tgsi_parse.h"
34
35 #include "svga_context.h"
36 #include "svga_cmd.h"
37 #include "svga_debug.h"
38 #include "svga_shader.h"
39 #include "svga_streamout.h"
40 #include "svga_resource_buffer.h"
41 #include "svga_tgsi.h"
42
43 /**
44 * Create the compute program.
45 */
46 static void *
svga_create_compute_state(struct pipe_context * pipe,const struct pipe_compute_state * templ)47 svga_create_compute_state(struct pipe_context *pipe,
48 const struct pipe_compute_state *templ)
49 {
50 struct svga_context *svga = svga_context(pipe);
51
52 struct svga_compute_shader *cs = CALLOC_STRUCT(svga_compute_shader);
53 nir_shader *nir = (nir_shader *)templ->prog;
54
55 if (!cs)
56 return NULL;
57
58 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CREATECS);
59
60 assert(templ->ir_type == PIPE_SHADER_IR_NIR);
61 /* nir_to_tgsi requires lowered images */
62 NIR_PASS_V(nir, gl_nir_lower_images, false);
63
64 cs->base.tokens = nir_to_tgsi((void *)nir, pipe->screen);
65
66 struct svga_shader *shader = &cs->base;
67 shader->id = svga->debug.shader_id++;
68 shader->type = PIPE_SHADER_IR_TGSI;
69 shader->stage = PIPE_SHADER_COMPUTE;
70
71 /* Collect shader basic info */
72 svga_tgsi_scan_shader(&cs->base);
73
74 cs->shared_mem_size = templ->static_shared_mem;
75
76 SVGA_STATS_TIME_POP(svga_sws(svga));
77 return cs;
78 }
79
80
81 /**
82 * Bind the compute program.
83 */
84 static void
svga_bind_compute_state(struct pipe_context * pipe,void * shader)85 svga_bind_compute_state(struct pipe_context *pipe, void *shader)
86 {
87 struct svga_context *svga = svga_context(pipe);
88 struct svga_compute_shader *cs = (struct svga_compute_shader *)shader;
89
90 svga->curr.cs = cs;
91 svga->dirty |= SVGA_NEW_CS;
92
93 /* Check if the shader uses samplers */
94 svga_set_curr_shader_use_samplers_flag(svga, PIPE_SHADER_COMPUTE,
95 svga_shader_use_samplers(&cs->base));
96 }
97
98
99 /**
100 * Delete the compute program.
101 */
102 static void
svga_delete_compute_state(struct pipe_context * pipe,void * shader)103 svga_delete_compute_state(struct pipe_context *pipe, void *shader)
104 {
105 struct svga_context *svga = svga_context(pipe);
106 struct svga_compute_shader *cs = (struct svga_compute_shader *)shader;
107 struct svga_compute_shader *next_cs;
108 struct svga_shader_variant *variant, *tmp;
109
110 svga_hwtnl_flush_retry(svga);
111
112 /* Free the list of compute shaders */
113 while (cs) {
114 next_cs = (struct svga_compute_shader *)cs->base.next;
115
116 for (variant = cs->base.variants; variant; variant = tmp) {
117 tmp = variant->next;
118
119 /* Check if deleting currently bound shader */
120 if (variant == svga->state.hw_draw.cs) {
121 SVGA_RETRY(svga, svga_set_shader(svga, SVGA3D_SHADERTYPE_CS, NULL));
122 svga->state.hw_draw.cs = NULL;
123 }
124
125 svga_destroy_shader_variant(svga, variant);
126 }
127
128 FREE((void *)cs->base.tokens);
129 FREE(cs);
130 cs = next_cs;
131 }
132 }
133
134
135 /**
136 * Bind an array of shader resources that will be used by the
137 * compute program. Any resources that were previously bound to
138 * the specified range will be unbound after this call.
139 */
140 static void
svga_set_compute_resources(struct pipe_context * pipe,unsigned start,unsigned count,struct pipe_surface ** resources)141 svga_set_compute_resources(struct pipe_context *pipe,
142 unsigned start, unsigned count,
143 struct pipe_surface **resources)
144 {
145 //TODO
146 return;
147 }
148
149
150 /**
151 * Bind an array of buffers to be mapped into the address space of
152 * the GLOBAL resource. Any buffers that were previously bound
153 * between [first, first + count - 1] are unbound after this call.
154 */
155 static void
svga_set_global_binding(struct pipe_context * pipe,unsigned first,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)156 svga_set_global_binding(struct pipe_context *pipe,
157 unsigned first, unsigned count,
158 struct pipe_resource **resources,
159 uint32_t **handles)
160 {
161 //TODO
162 return;
163 }
164
165
166 /**
167 */
168 static void
svga_validate_compute_resources(struct svga_context * svga)169 svga_validate_compute_resources(struct svga_context *svga)
170 {
171 /* validate sampler view resources */
172 SVGA_RETRY(svga,
173 svga_validate_sampler_resources(svga, SVGA_PIPE_COMPUTE));
174
175 /* validate constant buffer resources */
176 SVGA_RETRY(svga,
177 svga_validate_constant_buffers(svga, SVGA_PIPE_COMPUTE));
178
179 /* validate image view resources */
180 SVGA_RETRY(svga,
181 svga_validate_image_views(svga, SVGA_PIPE_COMPUTE));
182
183 /* validate shader buffer resources */
184 SVGA_RETRY(svga,
185 svga_validate_shader_buffers(svga, SVGA_PIPE_COMPUTE));
186 }
187
188
189 /**
190 * Launch the compute kernel starting from instruction pc of the
191 * currently bound compute program.
192 */
193 static void
svga_launch_grid(struct pipe_context * pipe,const struct pipe_grid_info * info)194 svga_launch_grid(struct pipe_context *pipe,
195 const struct pipe_grid_info *info)
196 {
197 struct svga_context *svga = svga_context(pipe);
198 struct svga_winsys_context *swc = svga->swc;
199
200 assert(svga_have_gl43(svga));
201
202 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_LAUNCHGRID);
203
204 if (info->indirect) {
205 svga->curr.grid_info.indirect= info->indirect;
206 }
207
208 svga_update_compute_state(svga);
209
210 /* validate compute resources */
211 svga_validate_compute_resources(svga);
212
213 if (info->indirect) {
214 struct svga_winsys_surface *indirect_surf;
215 indirect_surf = svga_buffer_handle(svga, info->indirect,
216 PIPE_BIND_COMMAND_ARGS_BUFFER);
217 SVGA_RETRY(svga, SVGA3D_sm5_DispatchIndirect(swc, indirect_surf,
218 info->indirect_offset));
219 }
220 else {
221 svga->curr.grid_info.size[0] = info->grid[0];
222 svga->curr.grid_info.size[1] = info->grid[1];
223 svga->curr.grid_info.size[2] = info->grid[2];
224
225 SVGA_RETRY(svga, SVGA3D_sm5_Dispatch(swc, info->grid));
226 }
227
228 SVGA_STATS_TIME_POP(svga_sws(svga));
229 return;
230 }
231
232
233 /**
234 * Initialize the compute interface function pointers.
235 */
236 void
svga_init_cs_functions(struct svga_context * svga)237 svga_init_cs_functions(struct svga_context *svga)
238 {
239 svga->pipe.create_compute_state = svga_create_compute_state;
240 svga->pipe.bind_compute_state = svga_bind_compute_state;
241 svga->pipe.delete_compute_state = svga_delete_compute_state;
242 svga->pipe.set_compute_resources = svga_set_compute_resources;
243 svga->pipe.set_global_binding = svga_set_global_binding;
244 svga->pipe.launch_grid = svga_launch_grid;
245 }
246