1 //
2 // Copyright 2019 Karol Herbst
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a
5 // copy of this software and associated documentation files (the "Software"),
6 // to deal in the Software without restriction, including without limitation
7 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 // and/or sell copies of the Software, and to permit persons to whom the
9 // Software is furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 // OTHER DEALINGS IN THE SOFTWARE.
21 //
22
23 #include "invocation.hpp"
24
25 #include <tuple>
26
27 #include "core/device.hpp"
28 #include "core/error.hpp"
29 #include "core/binary.hpp"
30 #include "pipe/p_state.h"
31 #include "util/algorithm.hpp"
32 #include "util/functional.hpp"
33
34 #include <compiler/glsl_types.h>
35 #include <compiler/nir/nir_builder.h>
36 #include <compiler/nir/nir_serialize.h>
37 #include <compiler/spirv/nir_spirv.h>
38 #include <util/u_math.h>
39
40 using namespace clover;
41
42 #ifdef HAVE_CLOVER_SPIRV
43
44 // Refs and unrefs the glsl_type_singleton.
45 static class glsl_type_ref {
46 public:
glsl_type_ref()47 glsl_type_ref() {
48 glsl_type_singleton_init_or_ref();
49 }
50
~glsl_type_ref()51 ~glsl_type_ref() {
52 glsl_type_singleton_decref();
53 }
54 } glsl_type_ref;
55
56 static const nir_shader_compiler_options *
dev_get_nir_compiler_options(const device & dev)57 dev_get_nir_compiler_options(const device &dev)
58 {
59 const void *co = dev.get_compiler_options(PIPE_SHADER_IR_NIR);
60 return static_cast<const nir_shader_compiler_options*>(co);
61 }
62
debug_function(void * private_data,enum nir_spirv_debug_level level,size_t spirv_offset,const char * message)63 static void debug_function(void *private_data,
64 enum nir_spirv_debug_level level, size_t spirv_offset,
65 const char *message)
66 {
67 assert(private_data);
68 auto r_log = reinterpret_cast<std::string *>(private_data);
69 *r_log += message;
70 }
71
72 static void
clover_arg_size_align(const glsl_type * type,unsigned * size,unsigned * align)73 clover_arg_size_align(const glsl_type *type, unsigned *size, unsigned *align)
74 {
75 if (type == glsl_type::sampler_type || type->is_image()) {
76 *size = 0;
77 *align = 1;
78 } else {
79 *size = type->cl_size();
80 *align = type->cl_alignment();
81 }
82 }
83
84 static void
clover_nir_add_image_uniforms(nir_shader * shader)85 clover_nir_add_image_uniforms(nir_shader *shader)
86 {
87 /* Clover expects each image variable to take up a cl_mem worth of space in
88 * the arguments data. Add uniforms as needed to match this expectation.
89 */
90 nir_foreach_image_variable_safe(var, shader) {
91 nir_variable *uniform = rzalloc(shader, nir_variable);
92 uniform->name = ralloc_strdup(uniform, var->name);
93 uniform->type = glsl_uintN_t_type(sizeof(cl_mem) * 8);
94 uniform->data.mode = nir_var_uniform;
95 uniform->data.read_only = true;
96 uniform->data.location = var->data.location;
97
98 exec_node_insert_node_before(&var->node, &uniform->node);
99 }
100 }
101
102 static bool
clover_nir_lower_images(nir_shader * shader)103 clover_nir_lower_images(nir_shader *shader)
104 {
105 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
106
107 ASSERTED int last_loc = -1;
108 int num_rd_images = 0, num_wr_images = 0;
109 nir_foreach_image_variable(var, shader) {
110 /* Assume they come in order */
111 assert(var->data.location > last_loc);
112 last_loc = var->data.location;
113
114 if (var->data.access & ACCESS_NON_WRITEABLE)
115 var->data.driver_location = num_rd_images++;
116 else
117 var->data.driver_location = num_wr_images++;
118 }
119 shader->info.num_textures = num_rd_images;
120 BITSET_ZERO(shader->info.textures_used);
121 if (num_rd_images)
122 BITSET_SET_RANGE(shader->info.textures_used, 0, num_rd_images - 1);
123
124 BITSET_ZERO(shader->info.images_used);
125 if (num_wr_images)
126 BITSET_SET_RANGE(shader->info.images_used, 0, num_wr_images - 1);
127 shader->info.num_images = num_wr_images;
128
129 last_loc = -1;
130 int num_samplers = 0;
131 nir_foreach_uniform_variable(var, shader) {
132 if (var->type == glsl_bare_sampler_type()) {
133 /* Assume they come in order */
134 assert(var->data.location > last_loc);
135 last_loc = var->data.location;
136
137 /* TODO: Constant samplers */
138 var->data.driver_location = num_samplers++;
139 } else {
140 /* CL shouldn't have any sampled images */
141 assert(!glsl_type_is_sampler(var->type));
142 }
143 }
144 BITSET_ZERO(shader->info.samplers_used);
145 if (num_samplers)
146 BITSET_SET_RANGE(shader->info.samplers_used, 0, num_samplers - 1);
147
148 nir_builder b;
149 nir_builder_init(&b, impl);
150
151 bool progress = false;
152 nir_foreach_block_reverse(block, impl) {
153 nir_foreach_instr_reverse_safe(instr, block) {
154 switch (instr->type) {
155 case nir_instr_type_deref: {
156 nir_deref_instr *deref = nir_instr_as_deref(instr);
157 if (deref->deref_type != nir_deref_type_var)
158 break;
159
160 if (!glsl_type_is_image(deref->type) &&
161 !glsl_type_is_sampler(deref->type))
162 break;
163
164 b.cursor = nir_instr_remove(&deref->instr);
165 nir_ssa_def *loc =
166 nir_imm_intN_t(&b, deref->var->data.driver_location,
167 deref->dest.ssa.bit_size);
168 nir_ssa_def_rewrite_uses(&deref->dest.ssa, loc);
169 progress = true;
170 break;
171 }
172
173 case nir_instr_type_tex: {
174 nir_tex_instr *tex = nir_instr_as_tex(instr);
175 unsigned count = 0;
176 for (unsigned i = 0; i < tex->num_srcs; i++) {
177 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
178 tex->src[i].src_type == nir_tex_src_sampler_deref) {
179 nir_deref_instr *deref = nir_src_as_deref(tex->src[i].src);
180 if (deref->deref_type == nir_deref_type_var) {
181 /* In this case, we know the actual variable */
182 if (tex->src[i].src_type == nir_tex_src_texture_deref)
183 tex->texture_index = deref->var->data.driver_location;
184 else
185 tex->sampler_index = deref->var->data.driver_location;
186 /* This source gets discarded */
187 nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
188 NIR_SRC_INIT);
189 continue;
190 } else {
191 assert(tex->src[i].src.is_ssa);
192 b.cursor = nir_before_instr(&tex->instr);
193 /* Back-ends expect a 32-bit thing, not 64-bit */
194 nir_ssa_def *offset = nir_u2u32(&b, tex->src[i].src.ssa);
195 if (tex->src[i].src_type == nir_tex_src_texture_deref)
196 tex->src[count].src_type = nir_tex_src_texture_offset;
197 else
198 tex->src[count].src_type = nir_tex_src_sampler_offset;
199 nir_instr_rewrite_src(&tex->instr, &tex->src[count].src,
200 nir_src_for_ssa(offset));
201 }
202 } else {
203 /* If we've removed a source, move this one down */
204 if (count != i) {
205 assert(count < i);
206 tex->src[count].src_type = tex->src[i].src_type;
207 nir_instr_move_src(&tex->instr, &tex->src[count].src,
208 &tex->src[i].src);
209 }
210 }
211 count++;
212 }
213 tex->num_srcs = count;
214 progress = true;
215 break;
216 }
217
218 case nir_instr_type_intrinsic: {
219 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
220 switch (intrin->intrinsic) {
221 case nir_intrinsic_image_deref_load:
222 case nir_intrinsic_image_deref_store:
223 case nir_intrinsic_image_deref_atomic_add:
224 case nir_intrinsic_image_deref_atomic_imin:
225 case nir_intrinsic_image_deref_atomic_umin:
226 case nir_intrinsic_image_deref_atomic_imax:
227 case nir_intrinsic_image_deref_atomic_umax:
228 case nir_intrinsic_image_deref_atomic_and:
229 case nir_intrinsic_image_deref_atomic_or:
230 case nir_intrinsic_image_deref_atomic_xor:
231 case nir_intrinsic_image_deref_atomic_exchange:
232 case nir_intrinsic_image_deref_atomic_comp_swap:
233 case nir_intrinsic_image_deref_atomic_fadd:
234 case nir_intrinsic_image_deref_atomic_inc_wrap:
235 case nir_intrinsic_image_deref_atomic_dec_wrap:
236 case nir_intrinsic_image_deref_size:
237 case nir_intrinsic_image_deref_samples: {
238 assert(intrin->src[0].is_ssa);
239 b.cursor = nir_before_instr(&intrin->instr);
240 /* Back-ends expect a 32-bit thing, not 64-bit */
241 nir_ssa_def *offset = nir_u2u32(&b, intrin->src[0].ssa);
242 nir_rewrite_image_intrinsic(intrin, offset, false);
243 progress = true;
244 break;
245 }
246
247 default:
248 break;
249 }
250 break;
251 }
252
253 default:
254 break;
255 }
256 }
257 }
258
259 if (progress) {
260 nir_metadata_preserve(impl, nir_metadata_block_index |
261 nir_metadata_dominance);
262 } else {
263 nir_metadata_preserve(impl, nir_metadata_all);
264 }
265
266 return progress;
267 }
268
269 struct clover_lower_nir_state {
270 std::vector<binary::argument> &args;
271 uint32_t global_dims;
272 nir_variable *constant_var;
273 nir_variable *printf_buffer;
274 nir_variable *offset_vars[3];
275 };
276
277 static bool
clover_lower_nir_filter(const nir_instr * instr,const void *)278 clover_lower_nir_filter(const nir_instr *instr, const void *)
279 {
280 return instr->type == nir_instr_type_intrinsic;
281 }
282
283 static nir_ssa_def *
clover_lower_nir_instr(nir_builder * b,nir_instr * instr,void * _state)284 clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state)
285 {
286 clover_lower_nir_state *state = reinterpret_cast<clover_lower_nir_state*>(_state);
287 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
288
289 switch (intrinsic->intrinsic) {
290 case nir_intrinsic_load_printf_buffer_address: {
291 if (!state->printf_buffer) {
292 unsigned location = state->args.size();
293 state->args.emplace_back(binary::argument::global, sizeof(size_t),
294 8, 8, binary::argument::zero_ext,
295 binary::argument::printf_buffer);
296
297 const glsl_type *type = glsl_uint64_t_type();
298 state->printf_buffer = nir_variable_create(b->shader, nir_var_uniform,
299 type, "global_printf_buffer");
300 state->printf_buffer->data.location = location;
301 }
302 return nir_load_var(b, state->printf_buffer);
303 }
304 case nir_intrinsic_load_base_global_invocation_id: {
305 nir_ssa_def *loads[3];
306
307 /* create variables if we didn't do so alrady */
308 if (!state->offset_vars[0]) {
309 /* TODO: fix for 64 bit */
310 /* Even though we only place one scalar argument, clover will bind up to
311 * three 32 bit values
312 */
313 unsigned location = state->args.size();
314 state->args.emplace_back(binary::argument::scalar, 4, 4, 4,
315 binary::argument::zero_ext,
316 binary::argument::grid_offset);
317
318 const glsl_type *type = glsl_uint_type();
319 for (uint32_t i = 0; i < 3; i++) {
320 state->offset_vars[i] =
321 nir_variable_create(b->shader, nir_var_uniform, type,
322 "global_invocation_id_offsets");
323 state->offset_vars[i]->data.location = location + i;
324 }
325 }
326
327 for (int i = 0; i < 3; i++) {
328 nir_variable *var = state->offset_vars[i];
329 loads[i] = var ? nir_load_var(b, var) : nir_imm_int(b, 0);
330 }
331
332 return nir_u2u(b, nir_vec(b, loads, state->global_dims),
333 nir_dest_bit_size(intrinsic->dest));
334 }
335 case nir_intrinsic_load_constant_base_ptr: {
336 return nir_load_var(b, state->constant_var);
337 }
338
339 default:
340 return NULL;
341 }
342 }
343
344 static bool
clover_lower_nir(nir_shader * nir,std::vector<binary::argument> & args,uint32_t dims,uint32_t pointer_bit_size)345 clover_lower_nir(nir_shader *nir, std::vector<binary::argument> &args,
346 uint32_t dims, uint32_t pointer_bit_size)
347 {
348 nir_variable *constant_var = NULL;
349 if (nir->constant_data_size) {
350 const glsl_type *type = pointer_bit_size == 64 ? glsl_uint64_t_type() : glsl_uint_type();
351
352 constant_var = nir_variable_create(nir, nir_var_uniform, type,
353 "constant_buffer_addr");
354 constant_var->data.location = args.size();
355
356 args.emplace_back(binary::argument::global, sizeof(cl_mem),
357 pointer_bit_size / 8, pointer_bit_size / 8,
358 binary::argument::zero_ext,
359 binary::argument::constant_buffer);
360 }
361
362 clover_lower_nir_state state = { args, dims, constant_var };
363 return nir_shader_lower_instructions(nir,
364 clover_lower_nir_filter, clover_lower_nir_instr, &state);
365 }
366
367 static spirv_to_nir_options
create_spirv_options(const device & dev,std::string & r_log)368 create_spirv_options(const device &dev, std::string &r_log)
369 {
370 struct spirv_to_nir_options spirv_options = {};
371 spirv_options.environment = NIR_SPIRV_OPENCL;
372 if (dev.address_bits() == 32u) {
373 spirv_options.shared_addr_format = nir_address_format_32bit_offset;
374 spirv_options.global_addr_format = nir_address_format_32bit_global;
375 spirv_options.temp_addr_format = nir_address_format_32bit_offset;
376 spirv_options.constant_addr_format = nir_address_format_32bit_global;
377 } else {
378 spirv_options.shared_addr_format = nir_address_format_32bit_offset_as_64bit;
379 spirv_options.global_addr_format = nir_address_format_64bit_global;
380 spirv_options.temp_addr_format = nir_address_format_32bit_offset_as_64bit;
381 spirv_options.constant_addr_format = nir_address_format_64bit_global;
382 }
383 spirv_options.caps.address = true;
384 spirv_options.caps.float64 = true;
385 spirv_options.caps.int8 = true;
386 spirv_options.caps.int16 = true;
387 spirv_options.caps.int64 = true;
388 spirv_options.caps.kernel = true;
389 spirv_options.caps.kernel_image = dev.image_support();
390 spirv_options.caps.int64_atomics = dev.has_int64_atomics();
391 spirv_options.debug.func = &debug_function;
392 spirv_options.debug.private_data = &r_log;
393 spirv_options.caps.printf = true;
394 return spirv_options;
395 }
396
create_clc_disk_cache(void)397 struct disk_cache *clover::nir::create_clc_disk_cache(void)
398 {
399 struct mesa_sha1 ctx;
400 unsigned char sha1[20];
401 char cache_id[20 * 2 + 1];
402 _mesa_sha1_init(&ctx);
403
404 if (!disk_cache_get_function_identifier((void *)clover::nir::create_clc_disk_cache, &ctx))
405 return NULL;
406
407 _mesa_sha1_final(&ctx, sha1);
408
409 disk_cache_format_hex_id(cache_id, sha1, 20 * 2);
410 return disk_cache_create("clover-clc", cache_id, 0);
411 }
412
check_for_libclc(const device & dev)413 void clover::nir::check_for_libclc(const device &dev)
414 {
415 if (!nir_can_find_libclc(dev.address_bits()))
416 throw error(CL_COMPILER_NOT_AVAILABLE);
417 }
418
load_libclc_nir(const device & dev,std::string & r_log)419 nir_shader *clover::nir::load_libclc_nir(const device &dev, std::string &r_log)
420 {
421 spirv_to_nir_options spirv_options = create_spirv_options(dev, r_log);
422 auto *compiler_options = dev_get_nir_compiler_options(dev);
423
424 return nir_load_libclc_shader(dev.address_bits(), dev.clc_cache,
425 &spirv_options, compiler_options);
426 }
427
428 static bool
can_remove_var(nir_variable * var,void * data)429 can_remove_var(nir_variable *var, void *data)
430 {
431 return !(var->type->is_sampler() ||
432 var->type->is_texture() ||
433 var->type->is_image());
434 }
435
spirv_to_nir(const binary & mod,const device & dev,std::string & r_log)436 binary clover::nir::spirv_to_nir(const binary &mod, const device &dev,
437 std::string &r_log)
438 {
439 spirv_to_nir_options spirv_options = create_spirv_options(dev, r_log);
440 std::shared_ptr<nir_shader> nir = dev.clc_nir;
441 spirv_options.clc_shader = nir.get();
442
443 binary b;
444 // We only insert one section.
445 assert(mod.secs.size() == 1);
446 auto §ion = mod.secs[0];
447
448 binary::resource_id section_id = 0;
449 for (const auto &sym : mod.syms) {
450 assert(sym.section == 0);
451
452 const auto *binary =
453 reinterpret_cast<const pipe_binary_program_header *>(section.data.data());
454 const uint32_t *data = reinterpret_cast<const uint32_t *>(binary->blob);
455 const size_t num_words = binary->num_bytes / 4;
456 const char *name = sym.name.c_str();
457 auto *compiler_options = dev_get_nir_compiler_options(dev);
458
459 nir_shader *nir = spirv_to_nir(data, num_words, nullptr, 0,
460 MESA_SHADER_KERNEL, name,
461 &spirv_options, compiler_options);
462 if (!nir) {
463 r_log += "Translation from SPIR-V to NIR for kernel \"" + sym.name +
464 "\" failed.\n";
465 throw build_error();
466 }
467
468 nir->info.workgroup_size_variable = sym.reqd_work_group_size[0] == 0;
469 nir->info.workgroup_size[0] = sym.reqd_work_group_size[0];
470 nir->info.workgroup_size[1] = sym.reqd_work_group_size[1];
471 nir->info.workgroup_size[2] = sym.reqd_work_group_size[2];
472 nir_validate_shader(nir, "clover");
473
474 // Inline all functions first.
475 // according to the comment on nir_inline_functions
476 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
477 NIR_PASS_V(nir, nir_lower_returns);
478 NIR_PASS_V(nir, nir_lower_libclc, spirv_options.clc_shader);
479
480 NIR_PASS_V(nir, nir_inline_functions);
481 NIR_PASS_V(nir, nir_copy_prop);
482 NIR_PASS_V(nir, nir_opt_deref);
483
484 // Pick off the single entrypoint that we want.
485 nir_remove_non_entrypoints(nir);
486
487 nir_validate_shader(nir, "clover after function inlining");
488
489 NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
490
491 struct nir_lower_printf_options printf_options;
492 printf_options.treat_doubles_as_floats = false;
493 printf_options.max_buffer_size = dev.max_printf_buffer_size();
494
495 NIR_PASS_V(nir, nir_lower_printf, &printf_options);
496
497 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
498
499 // copy propagate to prepare for lower_explicit_io
500 NIR_PASS_V(nir, nir_split_var_copies);
501 NIR_PASS_V(nir, nir_opt_copy_prop_vars);
502 NIR_PASS_V(nir, nir_lower_var_copies);
503 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
504 NIR_PASS_V(nir, nir_opt_dce);
505 NIR_PASS_V(nir, nir_lower_convert_alu_types, NULL);
506
507 if (compiler_options->lower_to_scalar) {
508 NIR_PASS_V(nir, nir_lower_alu_to_scalar,
509 compiler_options->lower_to_scalar_filter, NULL);
510 }
511 NIR_PASS_V(nir, nir_lower_system_values);
512 nir_lower_compute_system_values_options sysval_options = { 0 };
513 sysval_options.has_base_global_invocation_id = true;
514 NIR_PASS_V(nir, nir_lower_compute_system_values, &sysval_options);
515
516 // constant fold before lowering mem constants
517 NIR_PASS_V(nir, nir_opt_constant_folding);
518
519 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_mem_constant, NULL);
520 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_constant,
521 glsl_get_cl_type_size_align);
522 if (nir->constant_data_size > 0) {
523 assert(nir->constant_data == NULL);
524 nir->constant_data = rzalloc_size(nir, nir->constant_data_size);
525 nir_gather_explicit_io_initializers(nir, nir->constant_data,
526 nir->constant_data_size,
527 nir_var_mem_constant);
528 }
529 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_constant,
530 spirv_options.constant_addr_format);
531
532 auto args = sym.args;
533 NIR_PASS_V(nir, clover_lower_nir, args, dev.max_block_size().size(),
534 dev.address_bits());
535
536 NIR_PASS_V(nir, clover_nir_add_image_uniforms);
537 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
538 nir_var_uniform, clover_arg_size_align);
539 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
540 nir_var_mem_shared | nir_var_mem_global |
541 nir_var_function_temp,
542 glsl_get_cl_type_size_align);
543
544 NIR_PASS_V(nir, nir_opt_deref);
545 NIR_PASS_V(nir, nir_lower_readonly_images_to_tex, false);
546 NIR_PASS_V(nir, clover_nir_lower_images);
547 NIR_PASS_V(nir, nir_lower_memcpy);
548
549 /* use offsets for kernel inputs (uniform) */
550 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_uniform,
551 nir->info.cs.ptr_size == 64 ?
552 nir_address_format_32bit_offset_as_64bit :
553 nir_address_format_32bit_offset);
554
555 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_constant,
556 spirv_options.constant_addr_format);
557 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared,
558 spirv_options.shared_addr_format);
559
560 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp,
561 spirv_options.temp_addr_format);
562
563 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
564 spirv_options.global_addr_format);
565
566 struct nir_remove_dead_variables_options remove_dead_variables_options = {
567 .can_remove_var = can_remove_var,
568 };
569 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_all, &remove_dead_variables_options);
570
571 if (compiler_options->lower_int64_options)
572 NIR_PASS_V(nir, nir_lower_int64);
573
574 NIR_PASS_V(nir, nir_opt_dce);
575
576 if (nir->constant_data_size) {
577 const char *ptr = reinterpret_cast<const char *>(nir->constant_data);
578 const binary::section constants {
579 section_id,
580 binary::section::data_constant,
581 nir->constant_data_size,
582 { ptr, ptr + nir->constant_data_size }
583 };
584 nir->constant_data = NULL;
585 nir->constant_data_size = 0;
586 b.secs.push_back(constants);
587 }
588
589 void *mem_ctx = ralloc_context(NULL);
590 unsigned printf_info_count = nir->printf_info_count;
591 nir_printf_info *printf_infos = nir->printf_info;
592
593 ralloc_steal(mem_ctx, printf_infos);
594
595 struct blob blob;
596 blob_init(&blob);
597 nir_serialize(&blob, nir, false);
598
599 ralloc_free(nir);
600
601 const pipe_binary_program_header header { uint32_t(blob.size) };
602 binary::section text { section_id, binary::section::text_executable, header.num_bytes, {} };
603 text.data.insert(text.data.end(), reinterpret_cast<const char *>(&header),
604 reinterpret_cast<const char *>(&header) + sizeof(header));
605 text.data.insert(text.data.end(), blob.data, blob.data + blob.size);
606
607 free(blob.data);
608
609 b.printf_strings_in_buffer = false;
610 b.printf_infos.reserve(printf_info_count);
611 for (unsigned i = 0; i < printf_info_count; i++) {
612 binary::printf_info info;
613
614 info.arg_sizes.reserve(printf_infos[i].num_args);
615 for (unsigned j = 0; j < printf_infos[i].num_args; j++)
616 info.arg_sizes.push_back(printf_infos[i].arg_sizes[j]);
617
618 info.strings.resize(printf_infos[i].string_size);
619 memcpy(info.strings.data(), printf_infos[i].strings, printf_infos[i].string_size);
620 b.printf_infos.push_back(info);
621 }
622
623 ralloc_free(mem_ctx);
624
625 b.syms.emplace_back(sym.name, sym.attributes,
626 sym.reqd_work_group_size, section_id, 0, args);
627 b.secs.push_back(text);
628 section_id++;
629 }
630 return b;
631 }
632 #else
spirv_to_nir(const binary & mod,const device & dev,std::string & r_log)633 binary clover::nir::spirv_to_nir(const binary &mod, const device &dev, std::string &r_log)
634 {
635 r_log += "SPIR-V support in clover is not enabled.\n";
636 throw error(CL_LINKER_NOT_AVAILABLE);
637 }
638 #endif
639