1 //
2 // Copyright 2012 Francisco Jerez
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a
5 // copy of this software and associated documentation files (the "Software"),
6 // to deal in the Software without restriction, including without limitation
7 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 // and/or sell copies of the Software, and to permit persons to whom the
9 // Software is furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 // OTHER DEALINGS IN THE SOFTWARE.
21 //
22
23 #include <algorithm>
24 #include "core/device.hpp"
25 #include "core/platform.hpp"
26 #include "pipe/p_screen.h"
27 #include "pipe/p_state.h"
28 #include "spirv/invocation.hpp"
29 #include "util/bitscan.h"
30 #include "util/u_debug.h"
31 #include "spirv/invocation.hpp"
32 #include "nir/invocation.hpp"
33 #include <fstream>
34
35 using namespace clover;
36
37 namespace {
38 template<typename T>
39 std::vector<T>
get_compute_param(pipe_screen * pipe,pipe_shader_ir ir_format,pipe_compute_cap cap)40 get_compute_param(pipe_screen *pipe, pipe_shader_ir ir_format,
41 pipe_compute_cap cap) {
42 int sz = pipe->get_compute_param(pipe, ir_format, cap, NULL);
43 std::vector<T> v(sz / sizeof(T));
44
45 pipe->get_compute_param(pipe, ir_format, cap, &v.front());
46 return v;
47 }
48
49 cl_version
get_highest_supported_version(const device & dev)50 get_highest_supported_version(const device &dev) {
51 // All the checks below assume that the device supports FULL_PROFILE
52 // (which is the only profile support by clover) and that a device is
53 // not CUSTOM.
54 assert(dev.type() != CL_DEVICE_TYPE_CUSTOM);
55
56 cl_version version = CL_MAKE_VERSION(0, 0, 0);
57
58 const auto has_extension =
59 [extensions = dev.supported_extensions()](const char *extension_name){
60 return std::find_if(extensions.begin(), extensions.end(),
61 [extension_name](const cl_name_version &extension){
62 return strcmp(extension.name, extension_name) == 0;
63 }) != extensions.end();
64 };
65 const bool supports_images = dev.image_support();
66
67 // Check requirements for OpenCL 1.0
68 if (dev.max_compute_units() < 1 ||
69 dev.max_block_size().size() < 3 ||
70 // TODO: Check CL_DEVICE_MAX_WORK_ITEM_SIZES
71 dev.max_threads_per_block() < 1 ||
72 (dev.address_bits() != 32 && dev.address_bits() != 64) ||
73 dev.max_mem_alloc_size() < std::max(dev.max_mem_global() / 4,
74 (cl_ulong)128 * 1024 * 1024) ||
75 dev.max_mem_input() < 256 ||
76 dev.max_const_buffer_size() < 64 * 1024 ||
77 dev.max_const_buffers() < 8 ||
78 dev.max_mem_local() < 16 * 1024 ||
79 dev.clc_version < CL_MAKE_VERSION(1, 0, 0) ||
80 (supports_images &&
81 (dev.max_images_read() < 128 ||
82 dev.max_images_write() < 8 ||
83 dev.max_image_size() < 8192 ||
84 dev.max_image_size_3d() < 2048 ||
85 dev.max_samplers() < 16))) {
86 return version;
87 }
88 version = CL_MAKE_VERSION(1, 0, 0);
89
90 // Check requirements for OpenCL 1.1
91 if (!has_extension("cl_khr_byte_addressable_store") ||
92 !has_extension("cl_khr_global_int32_base_atomics") ||
93 !has_extension("cl_khr_global_int32_extended_atomics") ||
94 !has_extension("cl_khr_local_int32_base_atomics") ||
95 !has_extension("cl_khr_local_int32_extended_atomics") ||
96 // OpenCL 1.1 increased the minimum value for
97 // CL_DEVICE_MAX_PARAMETER_SIZE to 1024 bytes.
98 dev.max_mem_input() < 1024 ||
99 dev.mem_base_addr_align() < sizeof(cl_long16) ||
100 // OpenCL 1.1 increased the minimum value for
101 // CL_DEVICE_LOCAL_MEM_SIZE to 32 KB.
102 dev.max_mem_local() < 32 * 1024 ||
103 dev.clc_version < CL_MAKE_VERSION(1, 1, 0)) {
104 return version;
105 }
106 version = CL_MAKE_VERSION(1, 1, 0);
107
108 // Check requirements for OpenCL 1.2
109 if ((dev.has_doubles() && !has_extension("cl_khr_fp64")) ||
110 dev.clc_version < CL_MAKE_VERSION(1, 2, 0) ||
111 dev.max_printf_buffer_size() < 1 * 1024 * 1024 ||
112 (supports_images &&
113 (dev.max_image_buffer_size() < 65536 ||
114 dev.max_image_array_number() < 2048))) {
115 return version;
116 }
117 version = CL_MAKE_VERSION(1, 2, 0);
118
119 // Check requirements for OpenCL 3.0
120 if (dev.max_mem_alloc_size() < std::max(std::min((cl_ulong)1024 * 1024 * 1024,
121 dev.max_mem_global() / 4),
122 (cl_ulong)128 * 1024 * 1024) ||
123 // TODO: If pipes are supported, check:
124 // * CL_DEVICE_MAX_PIPE_ARGS
125 // * CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS
126 // * CL_DEVICE_PIPE_MAX_PACKET_SIZE
127 // TODO: If on-device queues are supported, check:
128 // * CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES
129 // * CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE
130 // * CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE
131 // * CL_DEVICE_MAX_ON_DEVICE_QUEUES
132 // * CL_DEVICE_MAX_ON_DEVICE_EVENTS
133 dev.clc_version < CL_MAKE_VERSION(3, 0, 0) ||
134 (supports_images &&
135 (dev.max_images_write() < 64 ||
136 dev.max_image_size() < 16384))) {
137 return version;
138 }
139 version = CL_MAKE_VERSION(3, 0, 0);
140
141 return version;
142 }
143 }
144
device(clover::platform & platform,pipe_loader_device * ldev)145 device::device(clover::platform &platform, pipe_loader_device *ldev) :
146 platform(platform), clc_cache(NULL), ldev(ldev) {
147 pipe = pipe_loader_create_screen(ldev);
148 if (pipe && pipe->get_param(pipe, PIPE_CAP_COMPUTE)) {
149 const bool has_supported_ir = supports_ir(PIPE_SHADER_IR_NATIVE) ||
150 supports_ir(PIPE_SHADER_IR_NIR_SERIALIZED);
151 if (has_supported_ir) {
152 unsigned major = 1, minor = 1;
153 debug_get_version_option("CLOVER_DEVICE_CLC_VERSION_OVERRIDE",
154 &major, &minor);
155 clc_version = CL_MAKE_VERSION(major, minor, 0);
156
157 version = get_highest_supported_version(*this);
158 major = CL_VERSION_MAJOR(version);
159 minor = CL_VERSION_MINOR(version);
160 debug_get_version_option("CLOVER_DEVICE_VERSION_OVERRIDE", &major,
161 &minor);
162 version = CL_MAKE_VERSION(major, minor, 0);
163
164 }
165
166 if (supports_ir(PIPE_SHADER_IR_NATIVE))
167 return;
168 #ifdef HAVE_CLOVER_SPIRV
169 if (supports_ir(PIPE_SHADER_IR_NIR_SERIALIZED)) {
170 nir::check_for_libclc(*this);
171 clc_cache = nir::create_clc_disk_cache();
172 clc_nir = lazy<std::shared_ptr<nir_shader>>([&] () { std::string log; return std::shared_ptr<nir_shader>(nir::load_libclc_nir(*this, log), ralloc_free); });
173 return;
174 }
175 #endif
176 }
177 if (pipe)
178 pipe->destroy(pipe);
179 throw error(CL_INVALID_DEVICE);
180 }
181
~device()182 device::~device() {
183 if (clc_cache)
184 disk_cache_destroy(clc_cache);
185 if (pipe)
186 pipe->destroy(pipe);
187 if (ldev)
188 pipe_loader_release(&ldev, 1);
189 }
190
191 bool
operator ==(const device & dev) const192 device::operator==(const device &dev) const {
193 return this == &dev;
194 }
195
196 cl_device_type
type() const197 device::type() const {
198 switch (ldev->type) {
199 case PIPE_LOADER_DEVICE_SOFTWARE:
200 return CL_DEVICE_TYPE_CPU;
201 case PIPE_LOADER_DEVICE_PCI:
202 case PIPE_LOADER_DEVICE_PLATFORM:
203 return CL_DEVICE_TYPE_GPU;
204 default:
205 unreachable("Unknown device type.");
206 }
207 }
208
209 cl_uint
vendor_id() const210 device::vendor_id() const {
211 switch (ldev->type) {
212 case PIPE_LOADER_DEVICE_SOFTWARE:
213 case PIPE_LOADER_DEVICE_PLATFORM:
214 return 0;
215 case PIPE_LOADER_DEVICE_PCI:
216 return ldev->u.pci.vendor_id;
217 default:
218 unreachable("Unknown device type.");
219 }
220 }
221
222 size_t
max_images_read() const223 device::max_images_read() const {
224 return PIPE_MAX_SHADER_SAMPLER_VIEWS;
225 }
226
227 size_t
max_images_write() const228 device::max_images_write() const {
229 return PIPE_MAX_SHADER_IMAGES;
230 }
231
232 size_t
max_image_buffer_size() const233 device::max_image_buffer_size() const {
234 return pipe->get_param(pipe, PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE);
235 }
236
237 cl_uint
max_image_size() const238 device::max_image_size() const {
239 return pipe->get_param(pipe, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
240 }
241
242 cl_uint
max_image_size_3d() const243 device::max_image_size_3d() const {
244 return 1 << (pipe->get_param(pipe, PIPE_CAP_MAX_TEXTURE_3D_LEVELS) - 1);
245 }
246
247 size_t
max_image_array_number() const248 device::max_image_array_number() const {
249 return pipe->get_param(pipe, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS);
250 }
251
252 cl_uint
max_samplers() const253 device::max_samplers() const {
254 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
255 PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS);
256 }
257
258 cl_ulong
max_mem_global() const259 device::max_mem_global() const {
260 return get_compute_param<uint64_t>(pipe, ir_format(),
261 PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE)[0];
262 }
263
264 cl_ulong
max_mem_local() const265 device::max_mem_local() const {
266 return get_compute_param<uint64_t>(pipe, ir_format(),
267 PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE)[0];
268 }
269
270 cl_ulong
max_mem_input() const271 device::max_mem_input() const {
272 return get_compute_param<uint64_t>(pipe, ir_format(),
273 PIPE_COMPUTE_CAP_MAX_INPUT_SIZE)[0];
274 }
275
276 cl_ulong
max_const_buffer_size() const277 device::max_const_buffer_size() const {
278 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
279 PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE);
280 }
281
282 cl_uint
max_const_buffers() const283 device::max_const_buffers() const {
284 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
285 PIPE_SHADER_CAP_MAX_CONST_BUFFERS);
286 }
287
288 size_t
max_threads_per_block() const289 device::max_threads_per_block() const {
290 return get_compute_param<uint64_t>(
291 pipe, ir_format(), PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK)[0];
292 }
293
294 cl_ulong
max_mem_alloc_size() const295 device::max_mem_alloc_size() const {
296 return get_compute_param<uint64_t>(pipe, ir_format(),
297 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE)[0];
298 }
299
300 cl_uint
max_clock_frequency() const301 device::max_clock_frequency() const {
302 return get_compute_param<uint32_t>(pipe, ir_format(),
303 PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY)[0];
304 }
305
306 cl_uint
max_compute_units() const307 device::max_compute_units() const {
308 return get_compute_param<uint32_t>(pipe, ir_format(),
309 PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS)[0];
310 }
311
312 cl_uint
max_printf_buffer_size() const313 device::max_printf_buffer_size() const {
314 return 1024 * 1024;
315 }
316
317 bool
image_support() const318 device::image_support() const {
319 return get_compute_param<uint32_t>(pipe, ir_format(),
320 PIPE_COMPUTE_CAP_IMAGES_SUPPORTED)[0];
321 }
322
323 bool
has_doubles() const324 device::has_doubles() const {
325 return pipe->get_param(pipe, PIPE_CAP_DOUBLES);
326 }
327
328 bool
has_halves() const329 device::has_halves() const {
330 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
331 PIPE_SHADER_CAP_FP16);
332 }
333
334 bool
has_int64_atomics() const335 device::has_int64_atomics() const {
336 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
337 PIPE_SHADER_CAP_INT64_ATOMICS);
338 }
339
340 bool
has_unified_memory() const341 device::has_unified_memory() const {
342 return pipe->get_param(pipe, PIPE_CAP_UMA);
343 }
344
345 size_t
mem_base_addr_align() const346 device::mem_base_addr_align() const {
347 uint64_t page_size = 0;
348 os_get_page_size(&page_size);
349 return std::max((size_t)page_size, sizeof(cl_long) * 16);
350 }
351
352 cl_device_svm_capabilities
svm_support() const353 device::svm_support() const {
354 // Without CAP_RESOURCE_FROM_USER_MEMORY SVM and CL_MEM_USE_HOST_PTR
355 // interactions won't work according to spec as clover manages a GPU side
356 // copy of the host data.
357 //
358 // The biggest problem are memory buffers created with CL_MEM_USE_HOST_PTR,
359 // but the application and/or the kernel updates the memory via SVM and not
360 // the cl_mem buffer.
361 // We can't even do proper tracking on what memory might have been accessed
362 // as the host ptr to the buffer could be within a SVM region, where through
363 // the CL API there is no reliable way of knowing if a certain cl_mem buffer
364 // was accessed by a kernel or not and the runtime can't reliably know from
365 // which side the GPU buffer content needs to be updated.
366 //
367 // Another unsolvable scenario is a cl_mem object passed by cl_mem reference
368 // and SVM pointer into the same kernel at the same time.
369 if (allows_user_pointers() && pipe->get_param(pipe, PIPE_CAP_SYSTEM_SVM))
370 // we can emulate all lower levels if we support fine grain system
371 return CL_DEVICE_SVM_FINE_GRAIN_SYSTEM |
372 CL_DEVICE_SVM_COARSE_GRAIN_BUFFER |
373 CL_DEVICE_SVM_FINE_GRAIN_BUFFER;
374 return 0;
375 }
376
377 bool
allows_user_pointers() const378 device::allows_user_pointers() const {
379 return pipe->get_param(pipe, PIPE_CAP_RESOURCE_FROM_USER_MEMORY) ||
380 pipe->get_param(pipe, PIPE_CAP_RESOURCE_FROM_USER_MEMORY_COMPUTE_ONLY);
381 }
382
383 std::vector<size_t>
max_block_size() const384 device::max_block_size() const {
385 auto v = get_compute_param<uint64_t>(pipe, ir_format(),
386 PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE);
387 return { v.begin(), v.end() };
388 }
389
390 cl_uint
subgroup_size() const391 device::subgroup_size() const {
392 return get_compute_param<uint32_t>(pipe, ir_format(),
393 PIPE_COMPUTE_CAP_SUBGROUP_SIZE)[0];
394 }
395
396 cl_uint
address_bits() const397 device::address_bits() const {
398 return get_compute_param<uint32_t>(pipe, ir_format(),
399 PIPE_COMPUTE_CAP_ADDRESS_BITS)[0];
400 }
401
402 std::string
device_name() const403 device::device_name() const {
404 return pipe->get_name(pipe);
405 }
406
407 std::string
vendor_name() const408 device::vendor_name() const {
409 return pipe->get_device_vendor(pipe);
410 }
411
412 enum pipe_shader_ir
ir_format() const413 device::ir_format() const {
414 if (supports_ir(PIPE_SHADER_IR_NATIVE))
415 return PIPE_SHADER_IR_NATIVE;
416
417 assert(supports_ir(PIPE_SHADER_IR_NIR_SERIALIZED));
418 return PIPE_SHADER_IR_NIR_SERIALIZED;
419 }
420
421 std::string
ir_target() const422 device::ir_target() const {
423 std::vector<char> target = get_compute_param<char>(
424 pipe, ir_format(), PIPE_COMPUTE_CAP_IR_TARGET);
425 return { target.data() };
426 }
427
428 enum pipe_endian
endianness() const429 device::endianness() const {
430 return (enum pipe_endian)pipe->get_param(pipe, PIPE_CAP_ENDIANNESS);
431 }
432
433 std::string
device_version_as_string() const434 device::device_version_as_string() const {
435 static const std::string version_string =
436 std::to_string(CL_VERSION_MAJOR(version)) + "." +
437 std::to_string(CL_VERSION_MINOR(version));
438 return version_string;
439 }
440
441 std::string
device_clc_version_as_string() const442 device::device_clc_version_as_string() const {
443 int major = CL_VERSION_MAJOR(clc_version);
444 int minor = CL_VERSION_MINOR(clc_version);
445
446 /* for CL 3.0 we need this to be 1.2 until we support 2.0. */
447 if (major == 3) {
448 major = 1;
449 minor = 2;
450 }
451 static const std::string version_string =
452 std::to_string(major) + "." +
453 std::to_string(minor);
454 return version_string;
455 }
456
457 bool
supports_ir(enum pipe_shader_ir ir) const458 device::supports_ir(enum pipe_shader_ir ir) const {
459 return pipe->get_shader_param(pipe, PIPE_SHADER_COMPUTE,
460 PIPE_SHADER_CAP_SUPPORTED_IRS) & (1 << ir);
461 }
462
463 std::vector<cl_name_version>
supported_extensions() const464 device::supported_extensions() const {
465 std::vector<cl_name_version> vec;
466
467 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_byte_addressable_store" } );
468 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_global_int32_base_atomics" } );
469 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_global_int32_extended_atomics" } );
470 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_local_int32_base_atomics" } );
471 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_local_int32_extended_atomics" } );
472 if (has_int64_atomics()) {
473 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_int64_base_atomics" } );
474 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_int64_extended_atomics" } );
475 }
476 if (has_doubles())
477 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_fp64" } );
478 if (has_halves())
479 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_fp16" } );
480 if (svm_support())
481 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_arm_shared_virtual_memory" } );
482 if (!clover::spirv::supported_versions().empty() &&
483 supports_ir(PIPE_SHADER_IR_NIR_SERIALIZED))
484 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_il_program" } );
485 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "cl_khr_extended_versioning" } );
486 return vec;
487 }
488
489 std::string
supported_extensions_as_string() const490 device::supported_extensions_as_string() const {
491 static std::string extensions_string;
492
493 if (!extensions_string.empty())
494 return extensions_string;
495
496 const auto extension_list = supported_extensions();
497 for (const auto &extension : extension_list) {
498 if (!extensions_string.empty())
499 extensions_string += " ";
500 extensions_string += extension.name;
501 }
502 return extensions_string;
503 }
504
505 std::vector<cl_name_version>
supported_il_versions() const506 device::supported_il_versions() const {
507 return clover::spirv::supported_versions();
508 }
509
510 const void *
get_compiler_options(enum pipe_shader_ir ir) const511 device::get_compiler_options(enum pipe_shader_ir ir) const {
512 return pipe->get_compiler_options(pipe, ir, PIPE_SHADER_COMPUTE);
513 }
514
515 cl_version
device_version() const516 device::device_version() const {
517 return version;
518 }
519
520 cl_version
device_clc_version(bool api) const521 device::device_clc_version(bool api) const {
522 /*
523 * For the API we have to limit this to 1.2,
524 * but internally we want 3.0 if it works.
525 */
526 if (!api)
527 return clc_version;
528
529 int major = CL_VERSION_MAJOR(clc_version);
530 /* for CL 3.0 we need this to be 1.2 until we support 2.0. */
531 if (major == 3) {
532 return CL_MAKE_VERSION(1, 2, 0);
533 }
534 return clc_version;
535 }
536
537 std::vector<cl_name_version>
opencl_c_all_versions() const538 device::opencl_c_all_versions() const {
539 std::vector<cl_name_version> vec;
540 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 0, 0), "OpenCL C" } );
541 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 1, 0), "OpenCL C" } );
542
543 if (CL_VERSION_MAJOR(clc_version) == 1 &&
544 CL_VERSION_MINOR(clc_version) == 2)
545 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 2, 0), "OpenCL C" } );
546 if (CL_VERSION_MAJOR(clc_version) == 3) {
547 vec.push_back( cl_name_version{ CL_MAKE_VERSION(1, 2, 0), "OpenCL C" } );
548 vec.push_back( cl_name_version{ CL_MAKE_VERSION(3, 0, 0), "OpenCL C" } );
549 }
550 return vec;
551 }
552
553 std::vector<cl_name_version>
opencl_c_features() const554 device::opencl_c_features() const {
555 std::vector<cl_name_version> vec;
556
557 vec.push_back( cl_name_version {CL_MAKE_VERSION(3, 0, 0), "__opencl_c_int64" });
558 if (has_doubles())
559 vec.push_back( cl_name_version {CL_MAKE_VERSION(3, 0, 0), "__opencl_c_fp64" });
560
561 return vec;
562 }
563