• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2019 Red Hat.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **************************************************************************/
25 #include "util/u_memory.h"
26 #include "util/simple_list.h"
27 #include "util/os_time.h"
28 #include "util/u_dump.h"
29 #include "util/u_string.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "gallivm/lp_bld_const.h"
33 #include "gallivm/lp_bld_debug.h"
34 #include "gallivm/lp_bld_intr.h"
35 #include "gallivm/lp_bld_flow.h"
36 #include "gallivm/lp_bld_gather.h"
37 #include "gallivm/lp_bld_coro.h"
38 #include "gallivm/lp_bld_nir.h"
39 #include "lp_state_cs.h"
40 #include "lp_context.h"
41 #include "lp_debug.h"
42 #include "lp_state.h"
43 #include "lp_perf.h"
44 #include "lp_screen.h"
45 #include "lp_memory.h"
46 #include "lp_query.h"
47 #include "lp_cs_tpool.h"
48 #include "frontend/sw_winsys.h"
49 #include "nir/nir_to_tgsi_info.h"
50 #include "util/mesa-sha1.h"
51 #include "nir_serialize.h"
52 
53 /** Fragment shader number (for debugging) */
54 static unsigned cs_no = 0;
55 
56 struct lp_cs_job_info {
57    unsigned grid_size[3];
58    unsigned block_size[3];
59    unsigned req_local_mem;
60    unsigned work_dim;
61    struct lp_cs_exec *current;
62 };
63 
64 static void
generate_compute(struct llvmpipe_context * lp,struct lp_compute_shader * shader,struct lp_compute_shader_variant * variant)65 generate_compute(struct llvmpipe_context *lp,
66                  struct lp_compute_shader *shader,
67                  struct lp_compute_shader_variant *variant)
68 {
69    struct gallivm_state *gallivm = variant->gallivm;
70    const struct lp_compute_shader_variant_key *key = &variant->key;
71    char func_name[64], func_name_coro[64];
72    LLVMTypeRef arg_types[17];
73    LLVMTypeRef func_type, coro_func_type;
74    LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
75    LLVMValueRef context_ptr;
76    LLVMValueRef x_size_arg, y_size_arg, z_size_arg;
77    LLVMValueRef grid_x_arg, grid_y_arg, grid_z_arg;
78    LLVMValueRef grid_size_x_arg, grid_size_y_arg, grid_size_z_arg;
79    LLVMValueRef work_dim_arg, thread_data_ptr;
80    LLVMBasicBlockRef block;
81    LLVMBuilderRef builder;
82    struct lp_build_sampler_soa *sampler;
83    struct lp_build_image_soa *image;
84    LLVMValueRef function, coro;
85    struct lp_type cs_type;
86    unsigned i;
87 
88    /*
89     * This function has two parts
90     * a) setup the coroutine execution environment loop.
91     * b) build the compute shader llvm for use inside the coroutine.
92     */
93    assert(lp_native_vector_width / 32 >= 4);
94 
95    memset(&cs_type, 0, sizeof cs_type);
96    cs_type.floating = TRUE;      /* floating point values */
97    cs_type.sign = TRUE;          /* values are signed */
98    cs_type.norm = FALSE;         /* values are not limited to [0,1] or [-1,1] */
99    cs_type.width = 32;           /* 32-bit float */
100    cs_type.length = MIN2(lp_native_vector_width / 32, 16); /* n*4 elements per vector */
101    snprintf(func_name, sizeof(func_name), "cs_variant");
102 
103    snprintf(func_name_coro, sizeof(func_name), "cs_co_variant");
104 
105    arg_types[0] = variant->jit_cs_context_ptr_type;       /* context */
106    arg_types[1] = int32_type;                          /* block_x_size */
107    arg_types[2] = int32_type;                          /* block_y_size */
108    arg_types[3] = int32_type;                          /* block_z_size */
109    arg_types[4] = int32_type;                          /* grid_x */
110    arg_types[5] = int32_type;                          /* grid_y */
111    arg_types[6] = int32_type;                          /* grid_z */
112    arg_types[7] = int32_type;                          /* grid_size_x */
113    arg_types[8] = int32_type;                          /* grid_size_y */
114    arg_types[9] = int32_type;                          /* grid_size_z */
115    arg_types[10] = int32_type;                         /* work dim */
116    arg_types[11] = variant->jit_cs_thread_data_ptr_type;  /* per thread data */
117    arg_types[12] = int32_type;                         /* coro only - num X loops */
118    arg_types[13] = int32_type;                         /* coro only - partials */
119    arg_types[14] = int32_type;                         /* coro block_x_size */
120    arg_types[15] = int32_type;                         /* coro block_y_size */
121    arg_types[16] = int32_type;                         /* coro block_z_size */
122    func_type = LLVMFunctionType(LLVMVoidTypeInContext(gallivm->context),
123                                 arg_types, ARRAY_SIZE(arg_types) - 5, 0);
124 
125    coro_func_type = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0),
126                                      arg_types, ARRAY_SIZE(arg_types), 0);
127 
128    function = LLVMAddFunction(gallivm->module, func_name, func_type);
129    LLVMSetFunctionCallConv(function, LLVMCCallConv);
130 
131    coro = LLVMAddFunction(gallivm->module, func_name_coro, coro_func_type);
132    LLVMSetFunctionCallConv(coro, LLVMCCallConv);
133 
134    variant->function = function;
135 
136    for(i = 0; i < ARRAY_SIZE(arg_types); ++i) {
137       if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
138          lp_add_function_attr(coro, i + 1, LP_FUNC_ATTR_NOALIAS);
139          lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
140       }
141    }
142 
143    lp_build_coro_declare_malloc_hooks(gallivm);
144 
145    if (variant->gallivm->cache->data_size)
146       return;
147 
148    context_ptr  = LLVMGetParam(function, 0);
149    x_size_arg = LLVMGetParam(function, 1);
150    y_size_arg = LLVMGetParam(function, 2);
151    z_size_arg = LLVMGetParam(function, 3);
152    grid_x_arg = LLVMGetParam(function, 4);
153    grid_y_arg = LLVMGetParam(function, 5);
154    grid_z_arg = LLVMGetParam(function, 6);
155    grid_size_x_arg = LLVMGetParam(function, 7);
156    grid_size_y_arg = LLVMGetParam(function, 8);
157    grid_size_z_arg = LLVMGetParam(function, 9);
158    work_dim_arg = LLVMGetParam(function, 10);
159    thread_data_ptr  = LLVMGetParam(function, 11);
160 
161    lp_build_name(context_ptr, "context");
162    lp_build_name(x_size_arg, "x_size");
163    lp_build_name(y_size_arg, "y_size");
164    lp_build_name(z_size_arg, "z_size");
165    lp_build_name(grid_x_arg, "grid_x");
166    lp_build_name(grid_y_arg, "grid_y");
167    lp_build_name(grid_z_arg, "grid_z");
168    lp_build_name(grid_size_x_arg, "grid_size_x");
169    lp_build_name(grid_size_y_arg, "grid_size_y");
170    lp_build_name(grid_size_z_arg, "grid_size_z");
171    lp_build_name(work_dim_arg, "work_dim");
172    lp_build_name(thread_data_ptr, "thread_data");
173 
174    block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
175    builder = gallivm->builder;
176    assert(builder);
177    LLVMPositionBuilderAtEnd(builder, block);
178    sampler = lp_llvm_sampler_soa_create(key->samplers, key->nr_samplers);
179    image = lp_llvm_image_soa_create(lp_cs_variant_key_images(key), key->nr_images);
180 
181    struct lp_build_loop_state loop_state[4];
182    LLVMValueRef num_x_loop;
183    LLVMValueRef vec_length = lp_build_const_int32(gallivm, cs_type.length);
184    num_x_loop = LLVMBuildAdd(gallivm->builder, x_size_arg, vec_length, "");
185    num_x_loop = LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), "");
186    num_x_loop = LLVMBuildUDiv(gallivm->builder, num_x_loop, vec_length, "");
187    LLVMValueRef partials = LLVMBuildURem(gallivm->builder, x_size_arg, vec_length, "");
188 
189    LLVMValueRef coro_num_hdls = LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, "");
190    coro_num_hdls = LLVMBuildMul(gallivm->builder, coro_num_hdls, z_size_arg, "");
191 
192    LLVMTypeRef hdl_ptr_type = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
193    LLVMValueRef coro_hdls = LLVMBuildArrayAlloca(gallivm->builder, hdl_ptr_type, coro_num_hdls, "coro_hdls");
194 
195    unsigned end_coroutine = INT_MAX;
196 
197    /*
198     * This is the main coroutine execution loop. It iterates over the dimensions
199     * and calls the coroutine main entrypoint on the first pass, but in subsequent
200     * passes it checks if the coroutine has completed and resumes it if not.
201     */
202    /* take x_width - round up to type.length width */
203    lp_build_loop_begin(&loop_state[3], gallivm,
204                        lp_build_const_int32(gallivm, 0)); /* coroutine reentry loop */
205    lp_build_loop_begin(&loop_state[2], gallivm,
206                        lp_build_const_int32(gallivm, 0)); /* z loop */
207    lp_build_loop_begin(&loop_state[1], gallivm,
208                        lp_build_const_int32(gallivm, 0)); /* y loop */
209    lp_build_loop_begin(&loop_state[0], gallivm,
210                        lp_build_const_int32(gallivm, 0)); /* x loop */
211    {
212       LLVMValueRef args[17];
213       args[0] = context_ptr;
214       args[1] = loop_state[0].counter;
215       args[2] = loop_state[1].counter;
216       args[3] = loop_state[2].counter;
217       args[4] = grid_x_arg;
218       args[5] = grid_y_arg;
219       args[6] = grid_z_arg;
220       args[7] = grid_size_x_arg;
221       args[8] = grid_size_y_arg;
222       args[9] = grid_size_z_arg;
223       args[10] = work_dim_arg;
224       args[11] = thread_data_ptr;
225       args[12] = num_x_loop;
226       args[13] = partials;
227       args[14] = x_size_arg;
228       args[15] = y_size_arg;
229       args[16] = z_size_arg;
230 
231       /* idx = (z * (size_x * size_y) + y * size_x + x */
232       LLVMValueRef coro_hdl_idx = LLVMBuildMul(gallivm->builder, loop_state[2].counter,
233                                                LLVMBuildMul(gallivm->builder, num_x_loop, y_size_arg, ""), "");
234       coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
235                                   LLVMBuildMul(gallivm->builder, loop_state[1].counter,
236                                                num_x_loop, ""), "");
237       coro_hdl_idx = LLVMBuildAdd(gallivm->builder, coro_hdl_idx,
238                                   loop_state[0].counter, "");
239 
240       LLVMValueRef coro_entry = LLVMBuildGEP(gallivm->builder, coro_hdls, &coro_hdl_idx, 1, "");
241 
242       LLVMValueRef coro_hdl = LLVMBuildLoad(gallivm->builder, coro_entry, "coro_hdl");
243 
244       struct lp_build_if_state ifstate;
245       LLVMValueRef cmp = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, loop_state[3].counter,
246                                        lp_build_const_int32(gallivm, 0), "");
247       /* first time here - call the coroutine function entry point */
248       lp_build_if(&ifstate, gallivm, cmp);
249       LLVMValueRef coro_ret = LLVMBuildCall(gallivm->builder, coro, args, 17, "");
250       LLVMBuildStore(gallivm->builder, coro_ret, coro_entry);
251       lp_build_else(&ifstate);
252       /* subsequent calls for this invocation - check if done. */
253       LLVMValueRef coro_done = lp_build_coro_done(gallivm, coro_hdl);
254       struct lp_build_if_state ifstate2;
255       lp_build_if(&ifstate2, gallivm, coro_done);
256       /* if done destroy and force loop exit */
257       lp_build_coro_destroy(gallivm, coro_hdl);
258       lp_build_loop_force_set_counter(&loop_state[3], lp_build_const_int32(gallivm, end_coroutine - 1));
259       lp_build_else(&ifstate2);
260       /* otherwise resume the coroutine */
261       lp_build_coro_resume(gallivm, coro_hdl);
262       lp_build_endif(&ifstate2);
263       lp_build_endif(&ifstate);
264       lp_build_loop_force_reload_counter(&loop_state[3]);
265    }
266    lp_build_loop_end_cond(&loop_state[0],
267                           num_x_loop,
268                           NULL,  LLVMIntUGE);
269    lp_build_loop_end_cond(&loop_state[1],
270                           y_size_arg,
271                           NULL,  LLVMIntUGE);
272    lp_build_loop_end_cond(&loop_state[2],
273                           z_size_arg,
274                           NULL,  LLVMIntUGE);
275    lp_build_loop_end_cond(&loop_state[3],
276                           lp_build_const_int32(gallivm, end_coroutine),
277                           NULL, LLVMIntEQ);
278    LLVMBuildRetVoid(builder);
279 
280    /* This is stage (b) - generate the compute shader code inside the coroutine. */
281    LLVMValueRef block_x_size_arg, block_y_size_arg, block_z_size_arg;
282    context_ptr  = LLVMGetParam(coro, 0);
283    x_size_arg = LLVMGetParam(coro, 1);
284    y_size_arg = LLVMGetParam(coro, 2);
285    z_size_arg = LLVMGetParam(coro, 3);
286    grid_x_arg = LLVMGetParam(coro, 4);
287    grid_y_arg = LLVMGetParam(coro, 5);
288    grid_z_arg = LLVMGetParam(coro, 6);
289    grid_size_x_arg = LLVMGetParam(coro, 7);
290    grid_size_y_arg = LLVMGetParam(coro, 8);
291    grid_size_z_arg = LLVMGetParam(coro, 9);
292    work_dim_arg = LLVMGetParam(coro, 10);
293    thread_data_ptr  = LLVMGetParam(coro, 11);
294    num_x_loop = LLVMGetParam(coro, 12);
295    partials = LLVMGetParam(coro, 13);
296    block_x_size_arg = LLVMGetParam(coro, 14);
297    block_y_size_arg = LLVMGetParam(coro, 15);
298    block_z_size_arg = LLVMGetParam(coro, 16);
299    block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "entry");
300    LLVMPositionBuilderAtEnd(builder, block);
301    {
302       LLVMValueRef consts_ptr, num_consts_ptr;
303       LLVMValueRef ssbo_ptr, num_ssbo_ptr;
304       LLVMValueRef shared_ptr;
305       LLVMValueRef kernel_args_ptr;
306       struct lp_build_mask_context mask;
307       struct lp_bld_tgsi_system_values system_values;
308 
309       memset(&system_values, 0, sizeof(system_values));
310       consts_ptr = lp_jit_cs_context_constants(gallivm, context_ptr);
311       num_consts_ptr = lp_jit_cs_context_num_constants(gallivm, context_ptr);
312       ssbo_ptr = lp_jit_cs_context_ssbos(gallivm, context_ptr);
313       num_ssbo_ptr = lp_jit_cs_context_num_ssbos(gallivm, context_ptr);
314       kernel_args_ptr = lp_jit_cs_context_kernel_args(gallivm, context_ptr);
315 
316       shared_ptr = lp_jit_cs_thread_data_shared(gallivm, thread_data_ptr);
317 
318       /* these are coroutine entrypoint necessities */
319       LLVMValueRef coro_id = lp_build_coro_id(gallivm);
320       LLVMValueRef coro_hdl = lp_build_coro_begin_alloc_mem(gallivm, coro_id);
321 
322       LLVMValueRef has_partials = LLVMBuildICmp(gallivm->builder, LLVMIntNE, partials, lp_build_const_int32(gallivm, 0), "");
323       LLVMValueRef tid_vals[3];
324       LLVMValueRef tids_x[LP_MAX_VECTOR_LENGTH], tids_y[LP_MAX_VECTOR_LENGTH], tids_z[LP_MAX_VECTOR_LENGTH];
325       LLVMValueRef base_val = LLVMBuildMul(gallivm->builder, x_size_arg, vec_length, "");
326       for (i = 0; i < cs_type.length; i++) {
327          tids_x[i] = LLVMBuildAdd(gallivm->builder, base_val, lp_build_const_int32(gallivm, i), "");
328          tids_y[i] = y_size_arg;
329          tids_z[i] = z_size_arg;
330       }
331       tid_vals[0] = lp_build_gather_values(gallivm, tids_x, cs_type.length);
332       tid_vals[1] = lp_build_gather_values(gallivm, tids_y, cs_type.length);
333       tid_vals[2] = lp_build_gather_values(gallivm, tids_z, cs_type.length);
334       system_values.thread_id = LLVMGetUndef(LLVMArrayType(LLVMVectorType(int32_type, cs_type.length), 3));
335       for (i = 0; i < 3; i++)
336          system_values.thread_id = LLVMBuildInsertValue(builder, system_values.thread_id, tid_vals[i], i, "");
337 
338       LLVMValueRef gtids[3] = { grid_x_arg, grid_y_arg, grid_z_arg };
339       system_values.block_id = LLVMGetUndef(LLVMVectorType(int32_type, 3));
340       for (i = 0; i < 3; i++)
341          system_values.block_id = LLVMBuildInsertElement(builder, system_values.block_id, gtids[i], lp_build_const_int32(gallivm, i), "");
342 
343       LLVMValueRef gstids[3] = { grid_size_x_arg, grid_size_y_arg, grid_size_z_arg };
344       system_values.grid_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
345       for (i = 0; i < 3; i++)
346          system_values.grid_size = LLVMBuildInsertElement(builder, system_values.grid_size, gstids[i], lp_build_const_int32(gallivm, i), "");
347 
348       system_values.work_dim = work_dim_arg;
349 
350       LLVMValueRef bsize[3] = { block_x_size_arg, block_y_size_arg, block_z_size_arg };
351       system_values.block_size = LLVMGetUndef(LLVMVectorType(int32_type, 3));
352       for (i = 0; i < 3; i++)
353          system_values.block_size = LLVMBuildInsertElement(builder, system_values.block_size, bsize[i], lp_build_const_int32(gallivm, i), "");
354 
355       LLVMValueRef last_x_loop = LLVMBuildICmp(gallivm->builder, LLVMIntEQ, x_size_arg, LLVMBuildSub(gallivm->builder, num_x_loop, lp_build_const_int32(gallivm, 1), ""), "");
356       LLVMValueRef use_partial_mask = LLVMBuildAnd(gallivm->builder, last_x_loop, has_partials, "");
357       struct lp_build_if_state if_state;
358       LLVMValueRef mask_val = lp_build_alloca(gallivm, LLVMVectorType(int32_type, cs_type.length), "mask");
359       LLVMValueRef full_mask_val = lp_build_const_int_vec(gallivm, cs_type, ~0);
360       LLVMBuildStore(gallivm->builder, full_mask_val, mask_val);
361 
362       lp_build_if(&if_state, gallivm, use_partial_mask);
363       struct lp_build_loop_state mask_loop_state;
364       lp_build_loop_begin(&mask_loop_state, gallivm, partials);
365       LLVMValueRef tmask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
366       tmask_val = LLVMBuildInsertElement(gallivm->builder, tmask_val, lp_build_const_int32(gallivm, 0), mask_loop_state.counter, "");
367       LLVMBuildStore(gallivm->builder, tmask_val, mask_val);
368       lp_build_loop_end_cond(&mask_loop_state, vec_length, NULL, LLVMIntUGE);
369       lp_build_endif(&if_state);
370 
371       mask_val = LLVMBuildLoad(gallivm->builder, mask_val, "");
372       lp_build_mask_begin(&mask, gallivm, cs_type, mask_val);
373 
374       struct lp_build_coro_suspend_info coro_info;
375 
376       LLVMBasicBlockRef sus_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "suspend");
377       LLVMBasicBlockRef clean_block = LLVMAppendBasicBlockInContext(gallivm->context, coro, "cleanup");
378 
379       coro_info.suspend = sus_block;
380       coro_info.cleanup = clean_block;
381 
382       struct lp_build_tgsi_params params;
383       memset(&params, 0, sizeof(params));
384 
385       params.type = cs_type;
386       params.mask = &mask;
387       params.consts_ptr = consts_ptr;
388       params.const_sizes_ptr = num_consts_ptr;
389       params.system_values = &system_values;
390       params.context_ptr = context_ptr;
391       params.sampler = sampler;
392       params.info = &shader->info.base;
393       params.ssbo_ptr = ssbo_ptr;
394       params.ssbo_sizes_ptr = num_ssbo_ptr;
395       params.image = image;
396       params.shared_ptr = shared_ptr;
397       params.coro = &coro_info;
398       params.kernel_args = kernel_args_ptr;
399 
400       if (shader->base.type == PIPE_SHADER_IR_TGSI)
401          lp_build_tgsi_soa(gallivm, shader->base.tokens, &params, NULL);
402       else
403          lp_build_nir_soa(gallivm, shader->base.ir.nir, &params,
404                           NULL);
405 
406       mask_val = lp_build_mask_end(&mask);
407 
408       lp_build_coro_suspend_switch(gallivm, &coro_info, NULL, true);
409       LLVMPositionBuilderAtEnd(builder, clean_block);
410 
411       lp_build_coro_free_mem(gallivm, coro_id, coro_hdl);
412 
413       LLVMBuildBr(builder, sus_block);
414       LLVMPositionBuilderAtEnd(builder, sus_block);
415 
416       lp_build_coro_end(gallivm, coro_hdl);
417       LLVMBuildRet(builder, coro_hdl);
418    }
419 
420    sampler->destroy(sampler);
421    image->destroy(image);
422 
423    gallivm_verify_function(gallivm, coro);
424    gallivm_verify_function(gallivm, function);
425 }
426 
427 static void *
llvmpipe_create_compute_state(struct pipe_context * pipe,const struct pipe_compute_state * templ)428 llvmpipe_create_compute_state(struct pipe_context *pipe,
429                                      const struct pipe_compute_state *templ)
430 {
431    struct lp_compute_shader *shader;
432    int nr_samplers, nr_sampler_views;
433 
434    shader = CALLOC_STRUCT(lp_compute_shader);
435    if (!shader)
436       return NULL;
437 
438    shader->no = cs_no++;
439 
440    shader->base.type = templ->ir_type;
441    shader->req_local_mem = templ->req_local_mem;
442    if (templ->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
443       struct blob_reader reader;
444       const struct pipe_binary_program_header *hdr = templ->prog;
445 
446       blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
447       shader->base.ir.nir = nir_deserialize(NULL, pipe->screen->get_compiler_options(pipe->screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE), &reader);
448       shader->base.type = PIPE_SHADER_IR_NIR;
449 
450       pipe->screen->finalize_nir(pipe->screen, shader->base.ir.nir, false);
451       shader->req_local_mem += ((struct nir_shader *)shader->base.ir.nir)->info.cs.shared_size;
452    } else if (templ->ir_type == PIPE_SHADER_IR_NIR)
453       shader->base.ir.nir = (struct nir_shader *)templ->prog;
454 
455    if (shader->base.type == PIPE_SHADER_IR_TGSI) {
456       /* get/save the summary info for this shader */
457       lp_build_tgsi_info(templ->prog, &shader->info);
458 
459       /* we need to keep a local copy of the tokens */
460       shader->base.tokens = tgsi_dup_tokens(templ->prog);
461    } else {
462       nir_tgsi_scan_shader(shader->base.ir.nir, &shader->info.base, false);
463    }
464 
465    make_empty_list(&shader->variants);
466 
467    nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
468    nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
469    int nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
470    shader->variant_key_size = lp_cs_variant_key_size(MAX2(nr_samplers, nr_sampler_views), nr_images);
471 
472    return shader;
473 }
474 
475 static void
llvmpipe_bind_compute_state(struct pipe_context * pipe,void * cs)476 llvmpipe_bind_compute_state(struct pipe_context *pipe,
477                             void *cs)
478 {
479    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
480 
481    if (llvmpipe->cs == cs)
482       return;
483 
484    llvmpipe->cs = (struct lp_compute_shader *)cs;
485    llvmpipe->cs_dirty |= LP_CSNEW_CS;
486 }
487 
488 /**
489  * Remove shader variant from two lists: the shader's variant list
490  * and the context's variant list.
491  */
492 static void
llvmpipe_remove_cs_shader_variant(struct llvmpipe_context * lp,struct lp_compute_shader_variant * variant)493 llvmpipe_remove_cs_shader_variant(struct llvmpipe_context *lp,
494                                   struct lp_compute_shader_variant *variant)
495 {
496    if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
497       debug_printf("llvmpipe: del cs #%u var %u v created %u v cached %u "
498                    "v total cached %u inst %u total inst %u\n",
499                    variant->shader->no, variant->no,
500                    variant->shader->variants_created,
501                    variant->shader->variants_cached,
502                    lp->nr_cs_variants, variant->nr_instrs, lp->nr_cs_instrs);
503    }
504 
505    gallivm_destroy(variant->gallivm);
506 
507    /* remove from shader's list */
508    remove_from_list(&variant->list_item_local);
509    variant->shader->variants_cached--;
510 
511    /* remove from context's list */
512    remove_from_list(&variant->list_item_global);
513    lp->nr_cs_variants--;
514    lp->nr_cs_instrs -= variant->nr_instrs;
515 
516    FREE(variant);
517 }
518 
519 static void
llvmpipe_delete_compute_state(struct pipe_context * pipe,void * cs)520 llvmpipe_delete_compute_state(struct pipe_context *pipe,
521                               void *cs)
522 {
523    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
524    struct lp_compute_shader *shader = cs;
525    struct lp_cs_variant_list_item *li;
526 
527    if (llvmpipe->cs == cs)
528       llvmpipe->cs = NULL;
529    for (unsigned i = 0; i < shader->max_global_buffers; i++)
530       pipe_resource_reference(&shader->global_buffers[i], NULL);
531    FREE(shader->global_buffers);
532 
533    /* Delete all the variants */
534    li = first_elem(&shader->variants);
535    while(!at_end(&shader->variants, li)) {
536       struct lp_cs_variant_list_item *next = next_elem(li);
537       llvmpipe_remove_cs_shader_variant(llvmpipe, li->base);
538       li = next;
539    }
540    if (shader->base.ir.nir)
541       ralloc_free(shader->base.ir.nir);
542    tgsi_free_tokens(shader->base.tokens);
543    FREE(shader);
544 }
545 
546 static struct lp_compute_shader_variant_key *
make_variant_key(struct llvmpipe_context * lp,struct lp_compute_shader * shader,char * store)547 make_variant_key(struct llvmpipe_context *lp,
548                  struct lp_compute_shader *shader,
549                  char *store)
550 {
551    int i;
552    struct lp_compute_shader_variant_key *key;
553    key = (struct lp_compute_shader_variant_key *)store;
554    memset(key, 0, offsetof(struct lp_compute_shader_variant_key, samplers[1]));
555 
556    /* This value will be the same for all the variants of a given shader:
557     */
558    key->nr_samplers = shader->info.base.file_max[TGSI_FILE_SAMPLER] + 1;
559 
560    struct lp_sampler_static_state *cs_sampler;
561 
562    cs_sampler = key->samplers;
563    for(i = 0; i < key->nr_samplers; ++i) {
564       if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
565          lp_sampler_static_sampler_state(&cs_sampler[i].sampler_state,
566                                          lp->samplers[PIPE_SHADER_COMPUTE][i]);
567       }
568    }
569 
570    /*
571     * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
572     * are dx10-style? Can't really have mixed opcodes, at least not
573     * if we want to skip the holes here (without rescanning tgsi).
574     */
575    if (shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
576       key->nr_sampler_views = shader->info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
577       for(i = 0; i < key->nr_sampler_views; ++i) {
578          /*
579           * Note sview may exceed what's representable by file_mask.
580           * This will still work, the only downside is that not actually
581           * used views may be included in the shader key.
582           */
583          if(shader->info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
584             lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
585                                             lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
586          }
587       }
588    }
589    else {
590       key->nr_sampler_views = key->nr_samplers;
591       for(i = 0; i < key->nr_sampler_views; ++i) {
592          if(shader->info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
593             lp_sampler_static_texture_state(&cs_sampler[i].texture_state,
594                                             lp->sampler_views[PIPE_SHADER_COMPUTE][i]);
595          }
596       }
597    }
598 
599    struct lp_image_static_state *lp_image;
600    lp_image = lp_cs_variant_key_images(key);
601    key->nr_images = shader->info.base.file_max[TGSI_FILE_IMAGE] + 1;
602    for (i = 0; i < key->nr_images; ++i) {
603       if (shader->info.base.file_mask[TGSI_FILE_IMAGE] & (1 << i)) {
604          lp_sampler_static_texture_state_image(&lp_image[i].image_state,
605                                                &lp->images[PIPE_SHADER_COMPUTE][i]);
606       }
607    }
608    return key;
609 }
610 
611 static void
dump_cs_variant_key(const struct lp_compute_shader_variant_key * key)612 dump_cs_variant_key(const struct lp_compute_shader_variant_key *key)
613 {
614    int i;
615    debug_printf("cs variant %p:\n", (void *) key);
616 
617    for (i = 0; i < key->nr_samplers; ++i) {
618       const struct lp_static_sampler_state *sampler = &key->samplers[i].sampler_state;
619       debug_printf("sampler[%u] = \n", i);
620       debug_printf("  .wrap = %s %s %s\n",
621                    util_str_tex_wrap(sampler->wrap_s, TRUE),
622                    util_str_tex_wrap(sampler->wrap_t, TRUE),
623                    util_str_tex_wrap(sampler->wrap_r, TRUE));
624       debug_printf("  .min_img_filter = %s\n",
625                    util_str_tex_filter(sampler->min_img_filter, TRUE));
626       debug_printf("  .min_mip_filter = %s\n",
627                    util_str_tex_mipfilter(sampler->min_mip_filter, TRUE));
628       debug_printf("  .mag_img_filter = %s\n",
629                    util_str_tex_filter(sampler->mag_img_filter, TRUE));
630       if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE)
631          debug_printf("  .compare_func = %s\n", util_str_func(sampler->compare_func, TRUE));
632       debug_printf("  .normalized_coords = %u\n", sampler->normalized_coords);
633       debug_printf("  .min_max_lod_equal = %u\n", sampler->min_max_lod_equal);
634       debug_printf("  .lod_bias_non_zero = %u\n", sampler->lod_bias_non_zero);
635       debug_printf("  .apply_min_lod = %u\n", sampler->apply_min_lod);
636       debug_printf("  .apply_max_lod = %u\n", sampler->apply_max_lod);
637    }
638    for (i = 0; i < key->nr_sampler_views; ++i) {
639       const struct lp_static_texture_state *texture = &key->samplers[i].texture_state;
640       debug_printf("texture[%u] = \n", i);
641       debug_printf("  .format = %s\n",
642                    util_format_name(texture->format));
643       debug_printf("  .target = %s\n",
644                    util_str_tex_target(texture->target, TRUE));
645       debug_printf("  .level_zero_only = %u\n",
646                    texture->level_zero_only);
647       debug_printf("  .pot = %u %u %u\n",
648                    texture->pot_width,
649                    texture->pot_height,
650                    texture->pot_depth);
651    }
652    struct lp_image_static_state *images = lp_cs_variant_key_images(key);
653    for (i = 0; i < key->nr_images; ++i) {
654       const struct lp_static_texture_state *image = &images[i].image_state;
655       debug_printf("image[%u] = \n", i);
656       debug_printf("  .format = %s\n",
657                    util_format_name(image->format));
658       debug_printf("  .target = %s\n",
659                    util_str_tex_target(image->target, TRUE));
660       debug_printf("  .level_zero_only = %u\n",
661                    image->level_zero_only);
662       debug_printf("  .pot = %u %u %u\n",
663                    image->pot_width,
664                    image->pot_height,
665                    image->pot_depth);
666    }
667 }
668 
669 static void
lp_debug_cs_variant(const struct lp_compute_shader_variant * variant)670 lp_debug_cs_variant(const struct lp_compute_shader_variant *variant)
671 {
672    debug_printf("llvmpipe: Compute shader #%u variant #%u:\n",
673                 variant->shader->no, variant->no);
674    if (variant->shader->base.type == PIPE_SHADER_IR_TGSI)
675       tgsi_dump(variant->shader->base.tokens, 0);
676    else
677       nir_print_shader(variant->shader->base.ir.nir, stderr);
678    dump_cs_variant_key(&variant->key);
679    debug_printf("\n");
680 }
681 
682 static void
lp_cs_get_ir_cache_key(struct lp_compute_shader_variant * variant,unsigned char ir_sha1_cache_key[20])683 lp_cs_get_ir_cache_key(struct lp_compute_shader_variant *variant,
684                        unsigned char ir_sha1_cache_key[20])
685 {
686    struct blob blob = { 0 };
687    unsigned ir_size;
688    void *ir_binary;
689 
690    blob_init(&blob);
691    nir_serialize(&blob, variant->shader->base.ir.nir, true);
692    ir_binary = blob.data;
693    ir_size = blob.size;
694 
695    struct mesa_sha1 ctx;
696    _mesa_sha1_init(&ctx);
697    _mesa_sha1_update(&ctx, &variant->key, variant->shader->variant_key_size);
698    _mesa_sha1_update(&ctx, ir_binary, ir_size);
699    _mesa_sha1_final(&ctx, ir_sha1_cache_key);
700 
701    blob_finish(&blob);
702 }
703 
704 static struct lp_compute_shader_variant *
generate_variant(struct llvmpipe_context * lp,struct lp_compute_shader * shader,const struct lp_compute_shader_variant_key * key)705 generate_variant(struct llvmpipe_context *lp,
706                  struct lp_compute_shader *shader,
707                  const struct lp_compute_shader_variant_key *key)
708 {
709    struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
710    struct lp_compute_shader_variant *variant;
711    char module_name[64];
712    unsigned char ir_sha1_cache_key[20];
713    struct lp_cached_code cached = { 0 };
714    bool needs_caching = false;
715    variant = MALLOC(sizeof *variant + shader->variant_key_size - sizeof variant->key);
716    if (!variant)
717       return NULL;
718 
719    memset(variant, 0, sizeof(*variant));
720    snprintf(module_name, sizeof(module_name), "cs%u_variant%u",
721             shader->no, shader->variants_created);
722 
723    variant->shader = shader;
724    memcpy(&variant->key, key, shader->variant_key_size);
725 
726    if (shader->base.ir.nir) {
727       lp_cs_get_ir_cache_key(variant, ir_sha1_cache_key);
728 
729       lp_disk_cache_find_shader(screen, &cached, ir_sha1_cache_key);
730       if (!cached.data_size)
731          needs_caching = true;
732    }
733    variant->gallivm = gallivm_create(module_name, lp->context, &cached);
734    if (!variant->gallivm) {
735       FREE(variant);
736       return NULL;
737    }
738 
739    variant->list_item_global.base = variant;
740    variant->list_item_local.base = variant;
741    variant->no = shader->variants_created++;
742 
743 
744 
745    if ((LP_DEBUG & DEBUG_CS) || (gallivm_debug & GALLIVM_DEBUG_IR)) {
746       lp_debug_cs_variant(variant);
747    }
748 
749    lp_jit_init_cs_types(variant);
750 
751    generate_compute(lp, shader, variant);
752 
753    gallivm_compile_module(variant->gallivm);
754 
755    lp_build_coro_add_malloc_hooks(variant->gallivm);
756    variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
757 
758    variant->jit_function = (lp_jit_cs_func)gallivm_jit_function(variant->gallivm, variant->function);
759 
760    if (needs_caching) {
761       lp_disk_cache_insert_shader(screen, &cached, ir_sha1_cache_key);
762    }
763    gallivm_free_ir(variant->gallivm);
764    return variant;
765 }
766 
767 static void
lp_cs_ctx_set_cs_variant(struct lp_cs_context * csctx,struct lp_compute_shader_variant * variant)768 lp_cs_ctx_set_cs_variant( struct lp_cs_context *csctx,
769                           struct lp_compute_shader_variant *variant)
770 {
771    csctx->cs.current.variant = variant;
772 }
773 
774 static void
llvmpipe_update_cs(struct llvmpipe_context * lp)775 llvmpipe_update_cs(struct llvmpipe_context *lp)
776 {
777    struct lp_compute_shader *shader = lp->cs;
778 
779    struct lp_compute_shader_variant_key *key;
780    struct lp_compute_shader_variant *variant = NULL;
781    struct lp_cs_variant_list_item *li;
782    char store[LP_CS_MAX_VARIANT_KEY_SIZE];
783 
784    key = make_variant_key(lp, shader, store);
785 
786    /* Search the variants for one which matches the key */
787    li = first_elem(&shader->variants);
788    while(!at_end(&shader->variants, li)) {
789       if(memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
790          variant = li->base;
791          break;
792       }
793       li = next_elem(li);
794    }
795 
796    if (variant) {
797       /* Move this variant to the head of the list to implement LRU
798        * deletion of shader's when we have too many.
799        */
800       move_to_head(&lp->cs_variants_list, &variant->list_item_global);
801    }
802    else {
803       /* variant not found, create it now */
804       int64_t t0, t1, dt;
805       unsigned i;
806       unsigned variants_to_cull;
807 
808       if (LP_DEBUG & DEBUG_CS) {
809          debug_printf("%u variants,\t%u instrs,\t%u instrs/variant\n",
810                       lp->nr_cs_variants,
811                       lp->nr_cs_instrs,
812                       lp->nr_cs_variants ? lp->nr_cs_instrs / lp->nr_cs_variants : 0);
813       }
814 
815       /* First, check if we've exceeded the max number of shader variants.
816        * If so, free 6.25% of them (the least recently used ones).
817        */
818       variants_to_cull = lp->nr_cs_variants >= LP_MAX_SHADER_VARIANTS ? LP_MAX_SHADER_VARIANTS / 16 : 0;
819 
820       if (variants_to_cull ||
821           lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS) {
822          if (gallivm_debug & GALLIVM_DEBUG_PERF) {
823             debug_printf("Evicting CS: %u cs variants,\t%u total variants,"
824                          "\t%u instrs,\t%u instrs/variant\n",
825                          shader->variants_cached,
826                          lp->nr_cs_variants, lp->nr_cs_instrs,
827                          lp->nr_cs_instrs / lp->nr_cs_variants);
828          }
829 
830          /*
831           * We need to re-check lp->nr_cs_variants because an arbitrarliy large
832           * number of shader variants (potentially all of them) could be
833           * pending for destruction on flush.
834           */
835 
836          for (i = 0; i < variants_to_cull || lp->nr_cs_instrs >= LP_MAX_SHADER_INSTRUCTIONS; i++) {
837             struct lp_cs_variant_list_item *item;
838             if (is_empty_list(&lp->cs_variants_list)) {
839                break;
840             }
841             item = last_elem(&lp->cs_variants_list);
842             assert(item);
843             assert(item->base);
844             llvmpipe_remove_cs_shader_variant(lp, item->base);
845          }
846       }
847       /*
848        * Generate the new variant.
849        */
850       t0 = os_time_get();
851       variant = generate_variant(lp, shader, key);
852       t1 = os_time_get();
853       dt = t1 - t0;
854       LP_COUNT_ADD(llvm_compile_time, dt);
855       LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */
856 
857       /* Put the new variant into the list */
858       if (variant) {
859          insert_at_head(&shader->variants, &variant->list_item_local);
860          insert_at_head(&lp->cs_variants_list, &variant->list_item_global);
861          lp->nr_cs_variants++;
862          lp->nr_cs_instrs += variant->nr_instrs;
863          shader->variants_cached++;
864       }
865    }
866    /* Bind this variant */
867    lp_cs_ctx_set_cs_variant(lp->csctx, variant);
868 }
869 
870 /**
871  * Called during state validation when LP_CSNEW_SAMPLER_VIEW is set.
872  */
873 static void
lp_csctx_set_sampler_views(struct lp_cs_context * csctx,unsigned num,struct pipe_sampler_view ** views)874 lp_csctx_set_sampler_views(struct lp_cs_context *csctx,
875                            unsigned num,
876                            struct pipe_sampler_view **views)
877 {
878    unsigned i, max_tex_num;
879 
880    LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
881 
882    assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
883 
884    max_tex_num = MAX2(num, csctx->cs.current_tex_num);
885 
886    for (i = 0; i < max_tex_num; i++) {
887       struct pipe_sampler_view *view = i < num ? views[i] : NULL;
888 
889       if (view) {
890          struct pipe_resource *res = view->texture;
891          struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
892          struct lp_jit_texture *jit_tex;
893          jit_tex = &csctx->cs.current.jit_context.textures[i];
894 
895          /* We're referencing the texture's internal data, so save a
896           * reference to it.
897           */
898          pipe_resource_reference(&csctx->cs.current_tex[i], res);
899 
900          if (!lp_tex->dt) {
901             /* regular texture - csctx array of mipmap level offsets */
902             int j;
903             unsigned first_level = 0;
904             unsigned last_level = 0;
905 
906             if (llvmpipe_resource_is_texture(res)) {
907                first_level = view->u.tex.first_level;
908                last_level = view->u.tex.last_level;
909                assert(first_level <= last_level);
910                assert(last_level <= res->last_level);
911                jit_tex->base = lp_tex->tex_data;
912             }
913             else {
914               jit_tex->base = lp_tex->data;
915             }
916             if (LP_PERF & PERF_TEX_MEM) {
917                /* use dummy tile memory */
918                jit_tex->base = lp_dummy_tile;
919                jit_tex->width = TILE_SIZE/8;
920                jit_tex->height = TILE_SIZE/8;
921                jit_tex->depth = 1;
922                jit_tex->first_level = 0;
923                jit_tex->last_level = 0;
924                jit_tex->mip_offsets[0] = 0;
925                jit_tex->row_stride[0] = 0;
926                jit_tex->img_stride[0] = 0;
927                jit_tex->num_samples = 0;
928                jit_tex->sample_stride = 0;
929             }
930             else {
931                jit_tex->width = res->width0;
932                jit_tex->height = res->height0;
933                jit_tex->depth = res->depth0;
934                jit_tex->first_level = first_level;
935                jit_tex->last_level = last_level;
936                jit_tex->num_samples = res->nr_samples;
937                jit_tex->sample_stride = 0;
938 
939                if (llvmpipe_resource_is_texture(res)) {
940                   for (j = first_level; j <= last_level; j++) {
941                      jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
942                      jit_tex->row_stride[j] = lp_tex->row_stride[j];
943                      jit_tex->img_stride[j] = lp_tex->img_stride[j];
944                   }
945                   jit_tex->sample_stride = lp_tex->sample_stride;
946 
947                   if (res->target == PIPE_TEXTURE_1D_ARRAY ||
948                       res->target == PIPE_TEXTURE_2D_ARRAY ||
949                       res->target == PIPE_TEXTURE_CUBE ||
950                       res->target == PIPE_TEXTURE_CUBE_ARRAY) {
951                      /*
952                       * For array textures, we don't have first_layer, instead
953                       * adjust last_layer (stored as depth) plus the mip level offsets
954                       * (as we have mip-first layout can't just adjust base ptr).
955                       * XXX For mip levels, could do something similar.
956                       */
957                      jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
958                      for (j = first_level; j <= last_level; j++) {
959                         jit_tex->mip_offsets[j] += view->u.tex.first_layer *
960                                                    lp_tex->img_stride[j];
961                      }
962                      if (view->target == PIPE_TEXTURE_CUBE ||
963                          view->target == PIPE_TEXTURE_CUBE_ARRAY) {
964                         assert(jit_tex->depth % 6 == 0);
965                      }
966                      assert(view->u.tex.first_layer <= view->u.tex.last_layer);
967                      assert(view->u.tex.last_layer < res->array_size);
968                   }
969                }
970                else {
971                   /*
972                    * For buffers, we don't have "offset", instead adjust
973                    * the size (stored as width) plus the base pointer.
974                    */
975                   unsigned view_blocksize = util_format_get_blocksize(view->format);
976                   /* probably don't really need to fill that out */
977                   jit_tex->mip_offsets[0] = 0;
978                   jit_tex->row_stride[0] = 0;
979                   jit_tex->img_stride[0] = 0;
980 
981                   /* everything specified in number of elements here. */
982                   jit_tex->width = view->u.buf.size / view_blocksize;
983                   jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
984                   /* XXX Unsure if we need to sanitize parameters? */
985                   assert(view->u.buf.offset + view->u.buf.size <= res->width0);
986                }
987             }
988          }
989          else {
990             /* display target texture/surface */
991             /*
992              * XXX: Where should this be unmapped?
993              */
994             struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
995             struct sw_winsys *winsys = screen->winsys;
996             jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
997                                                          PIPE_MAP_READ);
998             jit_tex->row_stride[0] = lp_tex->row_stride[0];
999             jit_tex->img_stride[0] = lp_tex->img_stride[0];
1000             jit_tex->mip_offsets[0] = 0;
1001             jit_tex->width = res->width0;
1002             jit_tex->height = res->height0;
1003             jit_tex->depth = res->depth0;
1004             jit_tex->first_level = jit_tex->last_level = 0;
1005             jit_tex->num_samples = res->nr_samples;
1006             jit_tex->sample_stride = 0;
1007             assert(jit_tex->base);
1008          }
1009       }
1010       else {
1011          pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
1012       }
1013    }
1014    csctx->cs.current_tex_num = num;
1015 }
1016 
1017 
1018 /**
1019  * Called during state validation when LP_NEW_SAMPLER is set.
1020  */
1021 static void
lp_csctx_set_sampler_state(struct lp_cs_context * csctx,unsigned num,struct pipe_sampler_state ** samplers)1022 lp_csctx_set_sampler_state(struct lp_cs_context *csctx,
1023                            unsigned num,
1024                            struct pipe_sampler_state **samplers)
1025 {
1026    unsigned i;
1027 
1028    LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);
1029 
1030    assert(num <= PIPE_MAX_SAMPLERS);
1031 
1032    for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
1033       const struct pipe_sampler_state *sampler = i < num ? samplers[i] : NULL;
1034 
1035       if (sampler) {
1036          struct lp_jit_sampler *jit_sam;
1037          jit_sam = &csctx->cs.current.jit_context.samplers[i];
1038 
1039          jit_sam->min_lod = sampler->min_lod;
1040          jit_sam->max_lod = sampler->max_lod;
1041          jit_sam->lod_bias = sampler->lod_bias;
1042          COPY_4V(jit_sam->border_color, sampler->border_color.f);
1043       }
1044    }
1045 }
1046 
1047 static void
lp_csctx_set_cs_constants(struct lp_cs_context * csctx,unsigned num,struct pipe_constant_buffer * buffers)1048 lp_csctx_set_cs_constants(struct lp_cs_context *csctx,
1049                           unsigned num,
1050                           struct pipe_constant_buffer *buffers)
1051 {
1052    unsigned i;
1053 
1054    LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) buffers);
1055 
1056    assert(num <= ARRAY_SIZE(csctx->constants));
1057 
1058    for (i = 0; i < num; ++i) {
1059       util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i]);
1060    }
1061    for (; i < ARRAY_SIZE(csctx->constants); i++) {
1062       util_copy_constant_buffer(&csctx->constants[i].current, NULL);
1063    }
1064 }
1065 
1066 static void
lp_csctx_set_cs_ssbos(struct lp_cs_context * csctx,unsigned num,struct pipe_shader_buffer * buffers)1067 lp_csctx_set_cs_ssbos(struct lp_cs_context *csctx,
1068                        unsigned num,
1069                        struct pipe_shader_buffer *buffers)
1070 {
1071    int i;
1072    LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *)buffers);
1073 
1074    assert (num <= ARRAY_SIZE(csctx->ssbos));
1075 
1076    for (i = 0; i < num; ++i) {
1077       util_copy_shader_buffer(&csctx->ssbos[i].current, &buffers[i]);
1078    }
1079    for (; i < ARRAY_SIZE(csctx->ssbos); i++) {
1080       util_copy_shader_buffer(&csctx->ssbos[i].current, NULL);
1081    }
1082 }
1083 
1084 static void
lp_csctx_set_cs_images(struct lp_cs_context * csctx,unsigned num,struct pipe_image_view * images)1085 lp_csctx_set_cs_images(struct lp_cs_context *csctx,
1086                        unsigned num,
1087                        struct pipe_image_view *images)
1088 {
1089    unsigned i;
1090 
1091    LP_DBG(DEBUG_SETUP, "%s %p\n", __FUNCTION__, (void *) images);
1092 
1093    assert(num <= ARRAY_SIZE(csctx->images));
1094 
1095    for (i = 0; i < num; ++i) {
1096       struct pipe_image_view *image = &images[i];
1097       util_copy_image_view(&csctx->images[i].current, &images[i]);
1098 
1099       struct pipe_resource *res = image->resource;
1100       struct llvmpipe_resource *lp_res = llvmpipe_resource(res);
1101       struct lp_jit_image *jit_image;
1102 
1103       jit_image = &csctx->cs.current.jit_context.images[i];
1104       if (!lp_res)
1105          continue;
1106       if (!lp_res->dt) {
1107          /* regular texture - csctx array of mipmap level offsets */
1108          if (llvmpipe_resource_is_texture(res)) {
1109             jit_image->base = lp_res->tex_data;
1110          } else
1111             jit_image->base = lp_res->data;
1112 
1113          jit_image->width = res->width0;
1114          jit_image->height = res->height0;
1115          jit_image->depth = res->depth0;
1116          jit_image->num_samples = res->nr_samples;
1117 
1118          if (llvmpipe_resource_is_texture(res)) {
1119             uint32_t mip_offset = lp_res->mip_offsets[image->u.tex.level];
1120 
1121             jit_image->width = u_minify(jit_image->width, image->u.tex.level);
1122             jit_image->height = u_minify(jit_image->height, image->u.tex.level);
1123 
1124             if (res->target == PIPE_TEXTURE_1D_ARRAY ||
1125                 res->target == PIPE_TEXTURE_2D_ARRAY ||
1126                 res->target == PIPE_TEXTURE_3D ||
1127                 res->target == PIPE_TEXTURE_CUBE ||
1128                 res->target == PIPE_TEXTURE_CUBE_ARRAY) {
1129                /*
1130                 * For array textures, we don't have first_layer, instead
1131                 * adjust last_layer (stored as depth) plus the mip level offsets
1132                 * (as we have mip-first layout can't just adjust base ptr).
1133                 * XXX For mip levels, could do something similar.
1134                 */
1135                jit_image->depth = image->u.tex.last_layer - image->u.tex.first_layer + 1;
1136                mip_offset += image->u.tex.first_layer * lp_res->img_stride[image->u.tex.level];
1137             } else
1138                jit_image->depth = u_minify(jit_image->depth, image->u.tex.level);
1139 
1140             jit_image->row_stride = lp_res->row_stride[image->u.tex.level];
1141             jit_image->img_stride = lp_res->img_stride[image->u.tex.level];
1142             jit_image->sample_stride = lp_res->sample_stride;
1143             jit_image->base = (uint8_t *)jit_image->base + mip_offset;
1144          } else {
1145             unsigned view_blocksize = util_format_get_blocksize(image->format);
1146             jit_image->width = image->u.buf.size / view_blocksize;
1147             jit_image->base = (uint8_t *)jit_image->base + image->u.buf.offset;
1148          }
1149       }
1150    }
1151    for (; i < ARRAY_SIZE(csctx->images); i++) {
1152       util_copy_image_view(&csctx->images[i].current, NULL);
1153    }
1154 }
1155 
1156 static void
update_csctx_consts(struct llvmpipe_context * llvmpipe)1157 update_csctx_consts(struct llvmpipe_context *llvmpipe)
1158 {
1159    struct lp_cs_context *csctx = llvmpipe->csctx;
1160    int i;
1161 
1162    for (i = 0; i < ARRAY_SIZE(csctx->constants); ++i) {
1163       struct pipe_resource *buffer = csctx->constants[i].current.buffer;
1164       const ubyte *current_data = NULL;
1165       unsigned current_size = csctx->constants[i].current.buffer_size;
1166       if (buffer) {
1167          /* resource buffer */
1168          current_data = (ubyte *) llvmpipe_resource_data(buffer);
1169       }
1170       else if (csctx->constants[i].current.user_buffer) {
1171          /* user-space buffer */
1172          current_data = (ubyte *) csctx->constants[i].current.user_buffer;
1173       }
1174 
1175       if (current_data && current_size >= sizeof(float)) {
1176          current_data += csctx->constants[i].current.buffer_offset;
1177          csctx->cs.current.jit_context.constants[i] = (const float *)current_data;
1178          csctx->cs.current.jit_context.num_constants[i] =
1179             DIV_ROUND_UP(csctx->constants[i].current.buffer_size,
1180                          lp_get_constant_buffer_stride(llvmpipe->pipe.screen));
1181       } else {
1182          static const float fake_const_buf[4];
1183          csctx->cs.current.jit_context.constants[i] = fake_const_buf;
1184          csctx->cs.current.jit_context.num_constants[i] = 0;
1185       }
1186    }
1187 }
1188 
1189 static void
update_csctx_ssbo(struct llvmpipe_context * llvmpipe)1190 update_csctx_ssbo(struct llvmpipe_context *llvmpipe)
1191 {
1192    struct lp_cs_context *csctx = llvmpipe->csctx;
1193    int i;
1194    for (i = 0; i < ARRAY_SIZE(csctx->ssbos); ++i) {
1195       struct pipe_resource *buffer = csctx->ssbos[i].current.buffer;
1196       const ubyte *current_data = NULL;
1197 
1198       if (!buffer)
1199          continue;
1200       /* resource buffer */
1201       current_data = (ubyte *) llvmpipe_resource_data(buffer);
1202       if (current_data) {
1203          current_data += csctx->ssbos[i].current.buffer_offset;
1204 
1205          csctx->cs.current.jit_context.ssbos[i] = (const uint32_t *)current_data;
1206          csctx->cs.current.jit_context.num_ssbos[i] = csctx->ssbos[i].current.buffer_size;
1207       } else {
1208          csctx->cs.current.jit_context.ssbos[i] = NULL;
1209          csctx->cs.current.jit_context.num_ssbos[i] = 0;
1210       }
1211    }
1212 }
1213 
1214 static void
llvmpipe_cs_update_derived(struct llvmpipe_context * llvmpipe,void * input)1215 llvmpipe_cs_update_derived(struct llvmpipe_context *llvmpipe, void *input)
1216 {
1217    if (llvmpipe->cs_dirty & LP_CSNEW_CONSTANTS) {
1218       lp_csctx_set_cs_constants(llvmpipe->csctx,
1219                                 ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_COMPUTE]),
1220                                 llvmpipe->constants[PIPE_SHADER_COMPUTE]);
1221       update_csctx_consts(llvmpipe);
1222    }
1223 
1224    if (llvmpipe->cs_dirty & LP_CSNEW_SSBOS) {
1225       lp_csctx_set_cs_ssbos(llvmpipe->csctx,
1226                             ARRAY_SIZE(llvmpipe->ssbos[PIPE_SHADER_COMPUTE]),
1227                             llvmpipe->ssbos[PIPE_SHADER_COMPUTE]);
1228       update_csctx_ssbo(llvmpipe);
1229    }
1230 
1231    if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER_VIEW)
1232       lp_csctx_set_sampler_views(llvmpipe->csctx,
1233                                  llvmpipe->num_sampler_views[PIPE_SHADER_COMPUTE],
1234                                  llvmpipe->sampler_views[PIPE_SHADER_COMPUTE]);
1235 
1236    if (llvmpipe->cs_dirty & LP_CSNEW_SAMPLER)
1237       lp_csctx_set_sampler_state(llvmpipe->csctx,
1238                                  llvmpipe->num_samplers[PIPE_SHADER_COMPUTE],
1239                                  llvmpipe->samplers[PIPE_SHADER_COMPUTE]);
1240 
1241    if (llvmpipe->cs_dirty & LP_CSNEW_IMAGES)
1242       lp_csctx_set_cs_images(llvmpipe->csctx,
1243                               ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_COMPUTE]),
1244                               llvmpipe->images[PIPE_SHADER_COMPUTE]);
1245 
1246    if (input) {
1247       struct lp_cs_context *csctx = llvmpipe->csctx;
1248       csctx->input = input;
1249       csctx->cs.current.jit_context.kernel_args = input;
1250    }
1251 
1252    if (llvmpipe->cs_dirty & (LP_CSNEW_CS |
1253                              LP_CSNEW_IMAGES |
1254                              LP_CSNEW_SAMPLER_VIEW |
1255                              LP_CSNEW_SAMPLER))
1256       llvmpipe_update_cs(llvmpipe);
1257 
1258 
1259    llvmpipe->cs_dirty = 0;
1260 }
1261 
1262 static void
cs_exec_fn(void * init_data,int iter_idx,struct lp_cs_local_mem * lmem)1263 cs_exec_fn(void *init_data, int iter_idx, struct lp_cs_local_mem *lmem)
1264 {
1265    struct lp_cs_job_info *job_info = init_data;
1266    struct lp_jit_cs_thread_data thread_data;
1267 
1268    memset(&thread_data, 0, sizeof(thread_data));
1269 
1270    if (lmem->local_size < job_info->req_local_mem) {
1271       lmem->local_mem_ptr = REALLOC(lmem->local_mem_ptr, lmem->local_size,
1272                                     job_info->req_local_mem);
1273       lmem->local_size = job_info->req_local_mem;
1274    }
1275    thread_data.shared = lmem->local_mem_ptr;
1276 
1277    unsigned grid_z = iter_idx / (job_info->grid_size[0] * job_info->grid_size[1]);
1278    unsigned grid_y = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1]))) / job_info->grid_size[0];
1279    unsigned grid_x = (iter_idx - (grid_z * (job_info->grid_size[0] * job_info->grid_size[1])) - (grid_y * job_info->grid_size[0]));
1280    struct lp_compute_shader_variant *variant = job_info->current->variant;
1281    variant->jit_function(&job_info->current->jit_context,
1282                          job_info->block_size[0], job_info->block_size[1], job_info->block_size[2],
1283                          grid_x, grid_y, grid_z,
1284                          job_info->grid_size[0], job_info->grid_size[1], job_info->grid_size[2], job_info->work_dim,
1285                          &thread_data);
1286 }
1287 
1288 static void
fill_grid_size(struct pipe_context * pipe,const struct pipe_grid_info * info,uint32_t grid_size[3])1289 fill_grid_size(struct pipe_context *pipe,
1290                const struct pipe_grid_info *info,
1291                uint32_t grid_size[3])
1292 {
1293    struct pipe_transfer *transfer;
1294    uint32_t *params;
1295    if (!info->indirect) {
1296       grid_size[0] = info->grid[0];
1297       grid_size[1] = info->grid[1];
1298       grid_size[2] = info->grid[2];
1299       return;
1300    }
1301    params = pipe_buffer_map_range(pipe, info->indirect,
1302                                   info->indirect_offset,
1303                                   3 * sizeof(uint32_t),
1304                                   PIPE_MAP_READ,
1305                                   &transfer);
1306 
1307    if (!transfer)
1308       return;
1309 
1310    grid_size[0] = params[0];
1311    grid_size[1] = params[1];
1312    grid_size[2] = params[2];
1313    pipe_buffer_unmap(pipe, transfer);
1314 }
1315 
llvmpipe_launch_grid(struct pipe_context * pipe,const struct pipe_grid_info * info)1316 static void llvmpipe_launch_grid(struct pipe_context *pipe,
1317                                  const struct pipe_grid_info *info)
1318 {
1319    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
1320    struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
1321    struct lp_cs_job_info job_info;
1322 
1323    if (!llvmpipe_check_render_cond(llvmpipe))
1324       return;
1325 
1326    memset(&job_info, 0, sizeof(job_info));
1327 
1328    llvmpipe_cs_update_derived(llvmpipe, info->input);
1329 
1330    fill_grid_size(pipe, info, job_info.grid_size);
1331 
1332    job_info.block_size[0] = info->block[0];
1333    job_info.block_size[1] = info->block[1];
1334    job_info.block_size[2] = info->block[2];
1335    job_info.work_dim = info->work_dim;
1336    job_info.req_local_mem = llvmpipe->cs->req_local_mem;
1337    job_info.current = &llvmpipe->csctx->cs.current;
1338 
1339    int num_tasks = job_info.grid_size[2] * job_info.grid_size[1] * job_info.grid_size[0];
1340    if (num_tasks) {
1341       struct lp_cs_tpool_task *task;
1342       mtx_lock(&screen->cs_mutex);
1343       task = lp_cs_tpool_queue_task(screen->cs_tpool, cs_exec_fn, &job_info, num_tasks);
1344 
1345       lp_cs_tpool_wait_for_task(screen->cs_tpool, &task);
1346       mtx_unlock(&screen->cs_mutex);
1347    }
1348    llvmpipe->pipeline_statistics.cs_invocations += num_tasks * info->block[0] * info->block[1] * info->block[2];
1349 }
1350 
1351 static void
llvmpipe_set_compute_resources(struct pipe_context * pipe,unsigned start,unsigned count,struct pipe_surface ** resources)1352 llvmpipe_set_compute_resources(struct pipe_context *pipe,
1353                                unsigned start, unsigned count,
1354                                struct pipe_surface **resources)
1355 {
1356 
1357 
1358 }
1359 
1360 static void
llvmpipe_set_global_binding(struct pipe_context * pipe,unsigned first,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)1361 llvmpipe_set_global_binding(struct pipe_context *pipe,
1362                             unsigned first, unsigned count,
1363                             struct pipe_resource **resources,
1364                             uint32_t **handles)
1365 {
1366    struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
1367    struct lp_compute_shader *cs = llvmpipe->cs;
1368    unsigned i;
1369 
1370    if (first + count > cs->max_global_buffers) {
1371       unsigned old_max = cs->max_global_buffers;
1372       cs->max_global_buffers = first + count;
1373       cs->global_buffers = realloc(cs->global_buffers,
1374                                    cs->max_global_buffers * sizeof(cs->global_buffers[0]));
1375       if (!cs->global_buffers) {
1376          return;
1377       }
1378 
1379       memset(&cs->global_buffers[old_max], 0, (cs->max_global_buffers - old_max) * sizeof(cs->global_buffers[0]));
1380    }
1381 
1382    if (!resources) {
1383       for (i = 0; i < count; i++)
1384          pipe_resource_reference(&cs->global_buffers[first + i], NULL);
1385       return;
1386    }
1387 
1388    for (i = 0; i < count; i++) {
1389       uintptr_t va;
1390       uint32_t offset;
1391       pipe_resource_reference(&cs->global_buffers[first + i], resources[i]);
1392       struct llvmpipe_resource *lp_res = llvmpipe_resource(resources[i]);
1393       offset = *handles[i];
1394       va = (uintptr_t)((char *)lp_res->data + offset);
1395       memcpy(handles[i], &va, sizeof(va));
1396    }
1397 }
1398 
1399 void
llvmpipe_init_compute_funcs(struct llvmpipe_context * llvmpipe)1400 llvmpipe_init_compute_funcs(struct llvmpipe_context *llvmpipe)
1401 {
1402    llvmpipe->pipe.create_compute_state = llvmpipe_create_compute_state;
1403    llvmpipe->pipe.bind_compute_state = llvmpipe_bind_compute_state;
1404    llvmpipe->pipe.delete_compute_state = llvmpipe_delete_compute_state;
1405    llvmpipe->pipe.set_compute_resources = llvmpipe_set_compute_resources;
1406    llvmpipe->pipe.set_global_binding = llvmpipe_set_global_binding;
1407    llvmpipe->pipe.launch_grid = llvmpipe_launch_grid;
1408 }
1409 
1410 void
lp_csctx_destroy(struct lp_cs_context * csctx)1411 lp_csctx_destroy(struct lp_cs_context *csctx)
1412 {
1413    unsigned i;
1414    for (i = 0; i < ARRAY_SIZE(csctx->cs.current_tex); i++) {
1415       pipe_resource_reference(&csctx->cs.current_tex[i], NULL);
1416    }
1417    for (i = 0; i < ARRAY_SIZE(csctx->constants); i++) {
1418       pipe_resource_reference(&csctx->constants[i].current.buffer, NULL);
1419    }
1420    for (i = 0; i < ARRAY_SIZE(csctx->ssbos); i++) {
1421       pipe_resource_reference(&csctx->ssbos[i].current.buffer, NULL);
1422    }
1423    FREE(csctx);
1424 }
1425 
lp_csctx_create(struct pipe_context * pipe)1426 struct lp_cs_context *lp_csctx_create(struct pipe_context *pipe)
1427 {
1428    struct lp_cs_context *csctx;
1429 
1430    csctx = CALLOC_STRUCT(lp_cs_context);
1431    if (!csctx)
1432       return NULL;
1433 
1434    csctx->pipe = pipe;
1435    return csctx;
1436 }
1437