1 /*===--------------------------------------------------------------------------
2 * ATMI (Asynchronous Task and Memory Interface)
3 *
4 * This file is distributed under the MIT License. See LICENSE.txt for details.
5 *===------------------------------------------------------------------------*/
6 #include <gelf.h>
7 #include <libelf.h>
8
9 #include <cassert>
10 #include <cstdarg>
11 #include <fstream>
12 #include <iomanip>
13 #include <iostream>
14 #include <set>
15 #include <string>
16
17 #include "internal.h"
18 #include "machine.h"
19 #include "rt.h"
20
21 #include "msgpack.h"
22
23 #define msgpackErrorCheck(msg, status) \
24 if (status != 0) { \
25 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, #msg); \
26 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; \
27 } else { \
28 }
29
30 typedef unsigned char *address;
31 /*
32 * Note descriptors.
33 */
34 typedef struct {
35 uint32_t n_namesz; /* Length of note's name. */
36 uint32_t n_descsz; /* Length of note's value. */
37 uint32_t n_type; /* Type of note. */
38 // then name
39 // then padding, optional
40 // then desc, at 4 byte alignment (not 8, despite being elf64)
41 } Elf_Note;
42
43 // The following include file and following structs/enums
44 // have been replicated on a per-use basis below. For example,
45 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
46 // but we may care only about kernargSegmentSize_ for now, so
47 // we just include that field in our KernelMD implementation. We
48 // chose this approach to replicate in order to avoid forcing
49 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
50 // #include "llvm/Support/AMDGPUMetadata.h"
51 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
52 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
53 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
54 // using llvm::AMDGPU::HSAMD::AccessQualifier;
55 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
56 // using llvm::AMDGPU::HSAMD::ValueKind;
57 // using llvm::AMDGPU::HSAMD::ValueType;
58
59 class KernelArgMD {
60 public:
61 enum class ValueKind {
62 HiddenGlobalOffsetX,
63 HiddenGlobalOffsetY,
64 HiddenGlobalOffsetZ,
65 HiddenNone,
66 HiddenPrintfBuffer,
67 HiddenDefaultQueue,
68 HiddenCompletionAction,
69 HiddenMultiGridSyncArg,
70 HiddenHostcallBuffer,
71 Unknown
72 };
73
KernelArgMD()74 KernelArgMD()
75 : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
76 align_(0), valueKind_(ValueKind::Unknown) {}
77
78 // fields
79 std::string name_;
80 std::string typeName_;
81 uint32_t size_;
82 uint32_t offset_;
83 uint32_t align_;
84 ValueKind valueKind_;
85 };
86
87 class KernelMD {
88 public:
KernelMD()89 KernelMD() : kernargSegmentSize_(0ull) {}
90
91 // fields
92 uint64_t kernargSegmentSize_;
93 };
94
95 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
96 // Including only those fields that are relevant to the runtime.
97 // {"ByValue", KernelArgMD::ValueKind::ByValue},
98 // {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
99 // {"DynamicSharedPointer",
100 // KernelArgMD::ValueKind::DynamicSharedPointer},
101 // {"Sampler", KernelArgMD::ValueKind::Sampler},
102 // {"Image", KernelArgMD::ValueKind::Image},
103 // {"Pipe", KernelArgMD::ValueKind::Pipe},
104 // {"Queue", KernelArgMD::ValueKind::Queue},
105 {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
106 {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
107 {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
108 {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
109 {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
110 {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
111 {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
112 {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
113 {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
114 // v3
115 // {"by_value", KernelArgMD::ValueKind::ByValue},
116 // {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
117 // {"dynamic_shared_pointer",
118 // KernelArgMD::ValueKind::DynamicSharedPointer},
119 // {"sampler", KernelArgMD::ValueKind::Sampler},
120 // {"image", KernelArgMD::ValueKind::Image},
121 // {"pipe", KernelArgMD::ValueKind::Pipe},
122 // {"queue", KernelArgMD::ValueKind::Queue},
123 {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
124 {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
125 {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
126 {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
127 {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
128 {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
129 {"hidden_completion_action",
130 KernelArgMD::ValueKind::HiddenCompletionAction},
131 {"hidden_multigrid_sync_arg",
132 KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
133 {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
134 };
135
136 // public variables -- TODO(ashwinma) move these to a runtime object?
137 atmi_machine_t g_atmi_machine;
138 ATLMachine g_atl_machine;
139
140 hsa_region_t atl_gpu_kernarg_region;
141 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools;
142 hsa_region_t atl_cpu_kernarg_region;
143
144 static std::vector<hsa_executable_t> g_executables;
145
146 std::map<std::string, std::string> KernelNameMap;
147 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
148 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
149
150 bool g_atmi_initialized = false;
151 bool g_atmi_hostcall_required = false;
152
153 struct timespec context_init_time;
154 int context_init_time_init = 0;
155
156 /*
157 atlc is all internal global values.
158 The structure atl_context_t is defined in atl_internal.h
159 Most references will use the global structure prefix atlc.
160 However the pointer value atlc_p-> is equivalent to atlc.
161
162 */
163
164 atl_context_t atlc = {.struct_initialized = false};
165 atl_context_t *atlc_p = NULL;
166
167 namespace core {
168 /* Machine Info */
GetMachineInfo()169 atmi_machine_t *Runtime::GetMachineInfo() {
170 if (!atlc.g_hsa_initialized)
171 return NULL;
172 return &g_atmi_machine;
173 }
174
atl_set_atmi_initialized()175 void atl_set_atmi_initialized() {
176 // FIXME: thread safe? locks?
177 g_atmi_initialized = true;
178 }
179
atl_reset_atmi_initialized()180 void atl_reset_atmi_initialized() {
181 // FIXME: thread safe? locks?
182 g_atmi_initialized = false;
183 }
184
atl_is_atmi_initialized()185 bool atl_is_atmi_initialized() { return g_atmi_initialized; }
186
allow_access_to_all_gpu_agents(void * ptr)187 void allow_access_to_all_gpu_agents(void *ptr) {
188 hsa_status_t err;
189 std::vector<ATLGPUProcessor> &gpu_procs =
190 g_atl_machine.processors<ATLGPUProcessor>();
191 std::vector<hsa_agent_t> agents;
192 for (uint32_t i = 0; i < gpu_procs.size(); i++) {
193 agents.push_back(gpu_procs[i].agent());
194 }
195 err = hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr);
196 ErrorCheck(Allow agents ptr access, err);
197 }
198
Initialize()199 atmi_status_t Runtime::Initialize() {
200 atmi_devtype_t devtype = ATMI_DEVTYPE_GPU;
201 if (atl_is_atmi_initialized())
202 return ATMI_STATUS_SUCCESS;
203
204 if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) {
205 ATMIErrorCheck(GPU context init, atl_init_gpu_context());
206 }
207
208 atl_set_atmi_initialized();
209 return ATMI_STATUS_SUCCESS;
210 }
211
Finalize()212 atmi_status_t Runtime::Finalize() {
213 // TODO(ashwinma): Finalize all processors, queues, signals, kernarg memory
214 // regions
215 hsa_status_t err;
216
217 for (uint32_t i = 0; i < g_executables.size(); i++) {
218 err = hsa_executable_destroy(g_executables[i]);
219 ErrorCheck(Destroying executable, err);
220 }
221
222 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) {
223 SymbolInfoTable[i].clear();
224 }
225 SymbolInfoTable.clear();
226 for (uint32_t i = 0; i < KernelInfoTable.size(); i++) {
227 KernelInfoTable[i].clear();
228 }
229 KernelInfoTable.clear();
230
231 atl_reset_atmi_initialized();
232 err = hsa_shut_down();
233 ErrorCheck(Shutting down HSA, err);
234
235 return ATMI_STATUS_SUCCESS;
236 }
237
atmi_init_context_structs()238 void atmi_init_context_structs() {
239 atlc_p = &atlc;
240 atlc.struct_initialized = true; /* This only gets called one time */
241 atlc.g_hsa_initialized = false;
242 atlc.g_gpu_initialized = false;
243 atlc.g_tasks_initialized = false;
244 }
245
246 // Implement memory_pool iteration function
get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,void * data)247 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
248 void *data) {
249 ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
250 hsa_status_t err = HSA_STATUS_SUCCESS;
251 // Check if the memory_pool is allowed to allocate, i.e. do not return group
252 // memory
253 bool alloc_allowed = false;
254 err = hsa_amd_memory_pool_get_info(
255 memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
256 &alloc_allowed);
257 ErrorCheck(Alloc allowed in memory pool check, err);
258 if (alloc_allowed) {
259 uint32_t global_flag = 0;
260 err = hsa_amd_memory_pool_get_info(
261 memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
262 ErrorCheck(Get memory pool info, err);
263 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
264 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
265 proc->addMemory(new_mem);
266 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) {
267 DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle);
268 atl_gpu_kernarg_pools.push_back(memory_pool);
269 }
270 } else {
271 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
272 proc->addMemory(new_mem);
273 }
274 }
275
276 return err;
277 }
278
get_agent_info(hsa_agent_t agent,void * data)279 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
280 hsa_status_t err = HSA_STATUS_SUCCESS;
281 hsa_device_type_t device_type;
282 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
283 ErrorCheck(Get device type info, err);
284 switch (device_type) {
285 case HSA_DEVICE_TYPE_CPU: {
286 ;
287 ATLCPUProcessor new_proc(agent);
288 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
289 &new_proc);
290 ErrorCheck(Iterate all memory pools, err);
291 g_atl_machine.addProcessor(new_proc);
292 } break;
293 case HSA_DEVICE_TYPE_GPU: {
294 ;
295 hsa_profile_t profile;
296 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
297 ErrorCheck(Query the agent profile, err);
298 atmi_devtype_t gpu_type;
299 gpu_type =
300 (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
301 ATLGPUProcessor new_proc(agent, gpu_type);
302 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
303 &new_proc);
304 ErrorCheck(Iterate all memory pools, err);
305 g_atl_machine.addProcessor(new_proc);
306 } break;
307 case HSA_DEVICE_TYPE_DSP: {
308 err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
309 } break;
310 }
311
312 return err;
313 }
314
get_fine_grained_region(hsa_region_t region,void * data)315 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) {
316 hsa_region_segment_t segment;
317 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
318 if (segment != HSA_REGION_SEGMENT_GLOBAL) {
319 return HSA_STATUS_SUCCESS;
320 }
321 hsa_region_global_flag_t flags;
322 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
323 if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) {
324 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
325 *ret = region;
326 return HSA_STATUS_INFO_BREAK;
327 }
328 return HSA_STATUS_SUCCESS;
329 }
330
331 /* Determines if a memory region can be used for kernarg allocations. */
get_kernarg_memory_region(hsa_region_t region,void * data)332 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) {
333 hsa_region_segment_t segment;
334 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
335 if (HSA_REGION_SEGMENT_GLOBAL != segment) {
336 return HSA_STATUS_SUCCESS;
337 }
338
339 hsa_region_global_flag_t flags;
340 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
341 if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) {
342 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
343 *ret = region;
344 return HSA_STATUS_INFO_BREAK;
345 }
346
347 return HSA_STATUS_SUCCESS;
348 }
349
init_compute_and_memory()350 static hsa_status_t init_compute_and_memory() {
351 hsa_status_t err;
352
353 /* Iterate over the agents and pick the gpu agent */
354 err = hsa_iterate_agents(get_agent_info, NULL);
355 if (err == HSA_STATUS_INFO_BREAK) {
356 err = HSA_STATUS_SUCCESS;
357 }
358 ErrorCheck(Getting a gpu agent, err);
359 if (err != HSA_STATUS_SUCCESS)
360 return err;
361
362 /* Init all devices or individual device types? */
363 std::vector<ATLCPUProcessor> &cpu_procs =
364 g_atl_machine.processors<ATLCPUProcessor>();
365 std::vector<ATLGPUProcessor> &gpu_procs =
366 g_atl_machine.processors<ATLGPUProcessor>();
367 /* For CPU memory pools, add other devices that can access them directly
368 * or indirectly */
369 for (auto &cpu_proc : cpu_procs) {
370 for (auto &cpu_mem : cpu_proc.memories()) {
371 hsa_amd_memory_pool_t pool = cpu_mem.memory();
372 for (auto &gpu_proc : gpu_procs) {
373 hsa_agent_t agent = gpu_proc.agent();
374 hsa_amd_memory_pool_access_t access;
375 hsa_amd_agent_memory_pool_get_info(
376 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
377 if (access != 0) {
378 // this means not NEVER, but could be YES or NO
379 // add this memory pool to the proc
380 gpu_proc.addMemory(cpu_mem);
381 }
382 }
383 }
384 }
385
386 /* FIXME: are the below combinations of procs and memory pools needed?
387 * all to all compare procs with their memory pools and add those memory
388 * pools that are accessible by the target procs */
389 for (auto &gpu_proc : gpu_procs) {
390 for (auto &gpu_mem : gpu_proc.memories()) {
391 hsa_amd_memory_pool_t pool = gpu_mem.memory();
392 for (auto &cpu_proc : cpu_procs) {
393 hsa_agent_t agent = cpu_proc.agent();
394 hsa_amd_memory_pool_access_t access;
395 hsa_amd_agent_memory_pool_get_info(
396 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
397 if (access != 0) {
398 // this means not NEVER, but could be YES or NO
399 // add this memory pool to the proc
400 cpu_proc.addMemory(gpu_mem);
401 }
402 }
403 }
404 }
405
406 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size();
407 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size();
408
409 size_t num_procs = cpu_procs.size() + gpu_procs.size();
410 // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs *
411 // sizeof(atmi_device_t));
412 atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>(
413 malloc(num_procs * sizeof(atmi_device_t)));
414 int num_iGPUs = 0;
415 int num_dGPUs = 0;
416 for (uint32_t i = 0; i < gpu_procs.size(); i++) {
417 if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
418 num_iGPUs++;
419 else
420 num_dGPUs++;
421 }
422 assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
423 "Number of dGPUs and iGPUs do not add up");
424 DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
425 DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
426 DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
427 DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
428
429 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs;
430 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs;
431
432 int cpus_begin = 0;
433 int cpus_end = cpu_procs.size();
434 int gpus_begin = cpu_procs.size();
435 int gpus_end = cpu_procs.size() + gpu_procs.size();
436 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin];
437 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin];
438 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin];
439 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin];
440 int proc_index = 0;
441 for (int i = cpus_begin; i < cpus_end; i++) {
442 all_devices[i].type = cpu_procs[proc_index].type();
443
444 std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
445 int fine_memories_size = 0;
446 int coarse_memories_size = 0;
447 DEBUG_PRINT("CPU memory types:\t");
448 for (auto &memory : memories) {
449 atmi_memtype_t type = memory.type();
450 if (type == ATMI_MEMTYPE_FINE_GRAINED) {
451 fine_memories_size++;
452 DEBUG_PRINT("Fine\t");
453 } else {
454 coarse_memories_size++;
455 DEBUG_PRINT("Coarse\t");
456 }
457 }
458 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
459 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
460 proc_index++;
461 }
462 proc_index = 0;
463 for (int i = gpus_begin; i < gpus_end; i++) {
464 all_devices[i].type = gpu_procs[proc_index].type();
465
466 std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
467 int fine_memories_size = 0;
468 int coarse_memories_size = 0;
469 DEBUG_PRINT("GPU memory types:\t");
470 for (auto &memory : memories) {
471 atmi_memtype_t type = memory.type();
472 if (type == ATMI_MEMTYPE_FINE_GRAINED) {
473 fine_memories_size++;
474 DEBUG_PRINT("Fine\t");
475 } else {
476 coarse_memories_size++;
477 DEBUG_PRINT("Coarse\t");
478 }
479 }
480 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
481 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
482 proc_index++;
483 }
484 proc_index = 0;
485 atl_cpu_kernarg_region.handle = (uint64_t)-1;
486 if (cpu_procs.size() > 0) {
487 err = hsa_agent_iterate_regions(
488 cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region);
489 if (err == HSA_STATUS_INFO_BREAK) {
490 err = HSA_STATUS_SUCCESS;
491 }
492 err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
493 : HSA_STATUS_SUCCESS;
494 ErrorCheck(Finding a CPU kernarg memory region handle, err);
495 }
496 /* Find a memory region that supports kernel arguments. */
497 atl_gpu_kernarg_region.handle = (uint64_t)-1;
498 if (gpu_procs.size() > 0) {
499 hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region,
500 &atl_gpu_kernarg_region);
501 err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
502 : HSA_STATUS_SUCCESS;
503 ErrorCheck(Finding a kernarg memory region, err);
504 }
505 if (num_procs > 0)
506 return HSA_STATUS_SUCCESS;
507 else
508 return HSA_STATUS_ERROR_NOT_INITIALIZED;
509 }
510
init_hsa()511 hsa_status_t init_hsa() {
512 if (atlc.g_hsa_initialized == false) {
513 DEBUG_PRINT("Initializing HSA...");
514 hsa_status_t err = hsa_init();
515 ErrorCheck(Initializing the hsa runtime, err);
516 if (err != HSA_STATUS_SUCCESS)
517 return err;
518
519 err = init_compute_and_memory();
520 if (err != HSA_STATUS_SUCCESS)
521 return err;
522 ErrorCheck(After initializing compute and memory, err);
523
524 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
525 KernelInfoTable.resize(gpu_count);
526 SymbolInfoTable.resize(gpu_count);
527 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++)
528 SymbolInfoTable[i].clear();
529 for (uint32_t i = 0; i < KernelInfoTable.size(); i++)
530 KernelInfoTable[i].clear();
531 atlc.g_hsa_initialized = true;
532 DEBUG_PRINT("done\n");
533 }
534 return HSA_STATUS_SUCCESS;
535 }
536
init_tasks()537 void init_tasks() {
538 if (atlc.g_tasks_initialized != false)
539 return;
540 std::vector<hsa_agent_t> gpu_agents;
541 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
542 for (int gpu = 0; gpu < gpu_count; gpu++) {
543 atmi_place_t place = ATMI_PLACE_GPU(0, gpu);
544 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
545 gpu_agents.push_back(proc.agent());
546 }
547 atlc.g_tasks_initialized = true;
548 }
549
callbackEvent(const hsa_amd_event_t * event,void * data)550 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
551 #if (ROCM_VERSION_MAJOR >= 3) || \
552 (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
553 if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
554 #else
555 if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
556 #endif
557 hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
558 // memory_fault.agent
559 // memory_fault.virtual_address
560 // memory_fault.fault_reason_mask
561 // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
562 std::stringstream stream;
563 stream << std::hex << (uintptr_t)memory_fault.virtual_address;
564 std::string addr("0x" + stream.str());
565
566 std::string err_string = "[GPU Memory Error] Addr: " + addr;
567 err_string += " Reason: ";
568 if (!(memory_fault.fault_reason_mask & 0x00111111)) {
569 err_string += "No Idea! ";
570 } else {
571 if (memory_fault.fault_reason_mask & 0x00000001)
572 err_string += "Page not present or supervisor privilege. ";
573 if (memory_fault.fault_reason_mask & 0x00000010)
574 err_string += "Write access to a read-only page. ";
575 if (memory_fault.fault_reason_mask & 0x00000100)
576 err_string += "Execute access to a page marked NX. ";
577 if (memory_fault.fault_reason_mask & 0x00001000)
578 err_string += "Host access only. ";
579 if (memory_fault.fault_reason_mask & 0x00010000)
580 err_string += "ECC failure (if supported by HW). ";
581 if (memory_fault.fault_reason_mask & 0x00100000)
582 err_string += "Can't determine the exact fault address. ";
583 }
584 fprintf(stderr, "%s\n", err_string.c_str());
585 return HSA_STATUS_ERROR;
586 }
587 return HSA_STATUS_SUCCESS;
588 }
589
590 atmi_status_t atl_init_gpu_context() {
591 if (atlc.struct_initialized == false)
592 atmi_init_context_structs();
593 if (atlc.g_gpu_initialized != false)
594 return ATMI_STATUS_SUCCESS;
595
596 hsa_status_t err;
597 err = init_hsa();
598 if (err != HSA_STATUS_SUCCESS)
599 return ATMI_STATUS_ERROR;
600
601 if (context_init_time_init == 0) {
602 clock_gettime(CLOCK_MONOTONIC_RAW, &context_init_time);
603 context_init_time_init = 1;
604 }
605
606 err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
607 ErrorCheck(Registering the system for memory faults, err);
608
609 init_tasks();
610 atlc.g_gpu_initialized = true;
611 return ATMI_STATUS_SUCCESS;
612 }
613
614 bool isImplicit(KernelArgMD::ValueKind value_kind) {
615 switch (value_kind) {
616 case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
617 case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
618 case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
619 case KernelArgMD::ValueKind::HiddenNone:
620 case KernelArgMD::ValueKind::HiddenPrintfBuffer:
621 case KernelArgMD::ValueKind::HiddenDefaultQueue:
622 case KernelArgMD::ValueKind::HiddenCompletionAction:
623 case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
624 case KernelArgMD::ValueKind::HiddenHostcallBuffer:
625 return true;
626 default:
627 return false;
628 }
629 }
630
631 static std::pair<unsigned char *, unsigned char *>
632 find_metadata(void *binary, size_t binSize) {
633 std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
634
635 Elf *e = elf_memory(static_cast<char *>(binary), binSize);
636 if (elf_kind(e) != ELF_K_ELF) {
637 return failure;
638 }
639
640 size_t numpHdrs;
641 if (elf_getphdrnum(e, &numpHdrs) != 0) {
642 return failure;
643 }
644
645 for (size_t i = 0; i < numpHdrs; ++i) {
646 GElf_Phdr pHdr;
647 if (gelf_getphdr(e, i, &pHdr) != &pHdr) {
648 continue;
649 }
650 // Look for the runtime metadata note
651 if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
652 // Iterate over the notes in this segment
653 address ptr = (address)binary + pHdr.p_offset;
654 address segmentEnd = ptr + pHdr.p_filesz;
655
656 while (ptr < segmentEnd) {
657 Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
658 address name = (address)¬e[1];
659
660 if (note->n_type == 7 || note->n_type == 8) {
661 return failure;
662 } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
663 note->n_namesz == sizeof "AMD" &&
664 !memcmp(name, "AMD", note->n_namesz)) {
665 // code object v2 uses yaml metadata, no longer supported
666 return failure;
667 } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
668 note->n_namesz == sizeof "AMDGPU" &&
669 !memcmp(name, "AMDGPU", note->n_namesz)) {
670
671 // n_descsz = 485
672 // value is padded to 4 byte alignment, may want to move end up to
673 // match
674 size_t offset = sizeof(uint32_t) * 3 /* fields */
675 + sizeof("AMDGPU") /* name */
676 + 1 /* padding to 4 byte alignment */;
677
678 // Including the trailing padding means both pointers are 4 bytes
679 // aligned, which may be useful later.
680 unsigned char *metadata_start = (unsigned char *)ptr + offset;
681 unsigned char *metadata_end =
682 metadata_start + core::alignUp(note->n_descsz, 4);
683 return {metadata_start, metadata_end};
684 }
685 ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
686 core::alignUp(note->n_descsz, sizeof(int));
687 }
688 }
689 }
690
691 return failure;
692 }
693
694 namespace {
695 int map_lookup_array(msgpack::byte_range message, const char *needle,
696 msgpack::byte_range *res, uint64_t *size) {
697 unsigned count = 0;
698 struct s : msgpack::functors_defaults<s> {
699 s(unsigned &count, uint64_t *size) : count(count), size(size) {}
700 unsigned &count;
701 uint64_t *size;
702 const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
703 count++;
704 *size = N;
705 return bytes.end;
706 }
707 };
708
709 msgpack::foreach_map(message,
710 [&](msgpack::byte_range key, msgpack::byte_range value) {
711 if (msgpack::message_is_string(key, needle)) {
712 // If the message is an array, record number of
713 // elements in *size
714 msgpack::handle_msgpack<s>(value, {count, size});
715 // return the whole array
716 *res = value;
717 }
718 });
719 // Only claim success if exactly one key/array pair matched
720 return count != 1;
721 }
722
723 int map_lookup_string(msgpack::byte_range message, const char *needle,
724 std::string *res) {
725 unsigned count = 0;
726 struct s : public msgpack::functors_defaults<s> {
727 s(unsigned &count, std::string *res) : count(count), res(res) {}
728 unsigned &count;
729 std::string *res;
730 void handle_string(size_t N, const unsigned char *str) {
731 count++;
732 *res = std::string(str, str + N);
733 }
734 };
735 msgpack::foreach_map(message,
736 [&](msgpack::byte_range key, msgpack::byte_range value) {
737 if (msgpack::message_is_string(key, needle)) {
738 msgpack::handle_msgpack<s>(value, {count, res});
739 }
740 });
741 return count != 1;
742 }
743
744 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
745 uint64_t *res) {
746 unsigned count = 0;
747 msgpack::foreach_map(message,
748 [&](msgpack::byte_range key, msgpack::byte_range value) {
749 if (msgpack::message_is_string(key, needle)) {
750 msgpack::foronly_unsigned(value, [&](uint64_t x) {
751 count++;
752 *res = x;
753 });
754 }
755 });
756 return count != 1;
757 }
758
759 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
760 msgpack::byte_range *res) {
761 int rc = 1;
762 uint64_t i = 0;
763 msgpack::foreach_array(message, [&](msgpack::byte_range value) {
764 if (i == elt) {
765 *res = value;
766 rc = 0;
767 }
768 i++;
769 });
770 return rc;
771 }
772
773 int populate_kernelArgMD(msgpack::byte_range args_element,
774 KernelArgMD *kernelarg) {
775 using namespace msgpack;
776 int error = 0;
777 foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
778 if (message_is_string(key, ".name")) {
779 foronly_string(value, [&](size_t N, const unsigned char *str) {
780 kernelarg->name_ = std::string(str, str + N);
781 });
782 } else if (message_is_string(key, ".type_name")) {
783 foronly_string(value, [&](size_t N, const unsigned char *str) {
784 kernelarg->typeName_ = std::string(str, str + N);
785 });
786 } else if (message_is_string(key, ".size")) {
787 foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
788 } else if (message_is_string(key, ".offset")) {
789 foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
790 } else if (message_is_string(key, ".value_kind")) {
791 foronly_string(value, [&](size_t N, const unsigned char *str) {
792 std::string s = std::string(str, str + N);
793 auto itValueKind = ArgValueKind.find(s);
794 if (itValueKind != ArgValueKind.end()) {
795 kernelarg->valueKind_ = itValueKind->second;
796 }
797 });
798 }
799 });
800 return error;
801 }
802 } // namespace
803
804 static hsa_status_t get_code_object_custom_metadata(void *binary,
805 size_t binSize, int gpu) {
806 // parse code object with different keys from v2
807 // also, the kernel name is not the same as the symbol name -- so a
808 // symbol->name map is needed
809
810 std::pair<unsigned char *, unsigned char *> metadata =
811 find_metadata(binary, binSize);
812 if (!metadata.first) {
813 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
814 }
815
816 uint64_t kernelsSize = 0;
817 int msgpack_errors = 0;
818 msgpack::byte_range kernel_array;
819 msgpack_errors =
820 map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
821 &kernel_array, &kernelsSize);
822 msgpackErrorCheck(kernels lookup in program metadata, msgpack_errors);
823
824 for (size_t i = 0; i < kernelsSize; i++) {
825 assert(msgpack_errors == 0);
826 std::string kernelName;
827 std::string languageName;
828 std::string symbolName;
829
830 msgpack::byte_range element;
831 msgpack_errors += array_lookup_element(kernel_array, i, &element);
832 msgpackErrorCheck(element lookup in kernel metadata, msgpack_errors);
833
834 msgpack_errors += map_lookup_string(element, ".name", &kernelName);
835 msgpack_errors += map_lookup_string(element, ".language", &languageName);
836 msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
837 msgpackErrorCheck(strings lookup in kernel metadata, msgpack_errors);
838
839 atl_kernel_info_t info = {0, 0, 0, 0, 0, {}, {}, {}};
840 size_t kernel_explicit_args_size = 0;
841 uint64_t kernel_segment_size;
842 msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
843 &kernel_segment_size);
844 msgpackErrorCheck(kernarg segment size metadata lookup in kernel metadata,
845 msgpack_errors);
846
847 // create a map from symbol to name
848 DEBUG_PRINT("Kernel symbol %s; Name: %s; Size: %lu\n", symbolName.c_str(),
849 kernelName.c_str(), kernel_segment_size);
850 KernelNameMap[symbolName] = kernelName;
851
852 bool hasHiddenArgs = false;
853 if (kernel_segment_size > 0) {
854 uint64_t argsSize;
855 size_t offset = 0;
856
857 msgpack::byte_range args_array;
858 msgpack_errors +=
859 map_lookup_array(element, ".args", &args_array, &argsSize);
860 msgpackErrorCheck(kernel args metadata lookup in kernel metadata,
861 msgpack_errors);
862
863 info.num_args = argsSize;
864
865 for (size_t i = 0; i < argsSize; ++i) {
866 KernelArgMD lcArg;
867
868 msgpack::byte_range args_element;
869 msgpack_errors += array_lookup_element(args_array, i, &args_element);
870 msgpackErrorCheck(iterate args map in kernel args metadata,
871 msgpack_errors);
872
873 msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
874 msgpackErrorCheck(iterate args map in kernel args metadata,
875 msgpack_errors);
876
877 // TODO(ashwinma): should the below population actions be done only for
878 // non-implicit args?
879 // populate info with sizes and offsets
880 info.arg_sizes.push_back(lcArg.size_);
881 // v3 has offset field and not align field
882 size_t new_offset = lcArg.offset_;
883 size_t padding = new_offset - offset;
884 offset = new_offset;
885 info.arg_offsets.push_back(lcArg.offset_);
886 DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
887 lcArg.size_, lcArg.offset_);
888 offset += lcArg.size_;
889
890 // check if the arg is a hidden/implicit arg
891 // this logic assumes that all hidden args are 8-byte aligned
892 if (!isImplicit(lcArg.valueKind_)) {
893 kernel_explicit_args_size += lcArg.size_;
894 } else {
895 hasHiddenArgs = true;
896 }
897 kernel_explicit_args_size += padding;
898 }
899 }
900
901 // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
902 // in ATMI, do not count the compiler set implicit args, but set your own
903 // implicit args by discounting the compiler set implicit args
904 info.kernel_segment_size =
905 (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
906 sizeof(atmi_implicit_args_t);
907 DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
908 kernel_segment_size, info.kernel_segment_size);
909
910 // kernel received, now add it to the kernel info table
911 KernelInfoTable[gpu][kernelName] = info;
912 }
913
914 return HSA_STATUS_SUCCESS;
915 }
916
917 static hsa_status_t populate_InfoTables(hsa_executable_t executable,
918 hsa_executable_symbol_t symbol,
919 void *data) {
920 int gpu = *static_cast<int *>(data);
921 hsa_symbol_kind_t type;
922
923 uint32_t name_length;
924 hsa_status_t err;
925 err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
926 &type);
927 ErrorCheck(Symbol info extraction, err);
928 DEBUG_PRINT("Exec Symbol type: %d\n", type);
929 if (type == HSA_SYMBOL_KIND_KERNEL) {
930 err = hsa_executable_symbol_get_info(
931 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
932 ErrorCheck(Symbol info extraction, err);
933 char *name = reinterpret_cast<char *>(malloc(name_length + 1));
934 err = hsa_executable_symbol_get_info(symbol,
935 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
936 ErrorCheck(Symbol info extraction, err);
937 name[name_length] = 0;
938
939 if (KernelNameMap.find(std::string(name)) == KernelNameMap.end()) {
940 // did not find kernel name in the kernel map; this can happen only
941 // if the ROCr API for getting symbol info (name) is different from
942 // the comgr method of getting symbol info
943 ErrorCheck(Invalid kernel name, HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
944 }
945 atl_kernel_info_t info;
946 std::string kernelName = KernelNameMap[std::string(name)];
947 // by now, the kernel info table should already have an entry
948 // because the non-ROCr custom code object parsing is called before
949 // iterating over the code object symbols using ROCr
950 if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) {
951 ErrorCheck(Finding the entry kernel info table,
952 HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
953 }
954 // found, so assign and update
955 info = KernelInfoTable[gpu][kernelName];
956
957 /* Extract dispatch information from the symbol */
958 err = hsa_executable_symbol_get_info(
959 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
960 &(info.kernel_object));
961 ErrorCheck(Extracting the symbol from the executable, err);
962 err = hsa_executable_symbol_get_info(
963 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
964 &(info.group_segment_size));
965 ErrorCheck(Extracting the group segment size from the executable, err);
966 err = hsa_executable_symbol_get_info(
967 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
968 &(info.private_segment_size));
969 ErrorCheck(Extracting the private segment from the executable, err);
970
971 DEBUG_PRINT(
972 "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
973 "kernarg\n",
974 kernelName.c_str(), info.kernel_object, info.group_segment_size,
975 info.private_segment_size, info.kernel_segment_size);
976
977 // assign it back to the kernel info table
978 KernelInfoTable[gpu][kernelName] = info;
979 free(name);
980 } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
981 err = hsa_executable_symbol_get_info(
982 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
983 ErrorCheck(Symbol info extraction, err);
984 char *name = reinterpret_cast<char *>(malloc(name_length + 1));
985 err = hsa_executable_symbol_get_info(symbol,
986 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
987 ErrorCheck(Symbol info extraction, err);
988 name[name_length] = 0;
989
990 atl_symbol_info_t info;
991
992 err = hsa_executable_symbol_get_info(
993 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
994 ErrorCheck(Symbol info address extraction, err);
995
996 err = hsa_executable_symbol_get_info(
997 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
998 ErrorCheck(Symbol info size extraction, err);
999
1000 atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0);
1001 DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
1002 info.size);
1003 register_allocation(reinterpret_cast<void *>(info.addr), (size_t)info.size,
1004 place);
1005 SymbolInfoTable[gpu][std::string(name)] = info;
1006 if (strcmp(name, "needs_hostcall_buffer") == 0)
1007 g_atmi_hostcall_required = true;
1008 free(name);
1009 } else {
1010 DEBUG_PRINT("Symbol is an indirect function\n");
1011 }
1012 return HSA_STATUS_SUCCESS;
1013 }
1014
1015 atmi_status_t Runtime::RegisterModuleFromMemory(
1016 void *module_bytes, size_t module_size, atmi_place_t place,
1017 atmi_status_t (*on_deserialized_data)(void *data, size_t size,
1018 void *cb_state),
1019 void *cb_state) {
1020 hsa_status_t err;
1021 int gpu = place.device_id;
1022 assert(gpu >= 0);
1023
1024 DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu);
1025 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
1026 hsa_agent_t agent = proc.agent();
1027 hsa_executable_t executable = {0};
1028 hsa_profile_t agent_profile;
1029
1030 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
1031 ErrorCheck(Query the agent profile, err);
1032 // FIXME: Assume that every profile is FULL until we understand how to build
1033 // GCN with base profile
1034 agent_profile = HSA_PROFILE_FULL;
1035 /* Create the empty executable. */
1036 err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
1037 &executable);
1038 ErrorCheck(Create the executable, err);
1039
1040 bool module_load_success = false;
1041 do // Existing control flow used continue, preserve that for this patch
1042 {
1043 {
1044 // Some metadata info is not available through ROCr API, so use custom
1045 // code object metadata parsing to collect such metadata info
1046
1047 err = get_code_object_custom_metadata(module_bytes, module_size, gpu);
1048 ErrorCheckAndContinue(Getting custom code object metadata, err);
1049
1050 // Deserialize code object.
1051 hsa_code_object_t code_object = {0};
1052 err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
1053 &code_object);
1054 ErrorCheckAndContinue(Code Object Deserialization, err);
1055 assert(0 != code_object.handle);
1056
1057 // Mutating the device image here avoids another allocation & memcpy
1058 void *code_object_alloc_data =
1059 reinterpret_cast<void *>(code_object.handle);
1060 atmi_status_t atmi_err =
1061 on_deserialized_data(code_object_alloc_data, module_size, cb_state);
1062 ATMIErrorCheck(Error in deserialized_data callback, atmi_err);
1063
1064 /* Load the code object. */
1065 err =
1066 hsa_executable_load_code_object(executable, agent, code_object, NULL);
1067 ErrorCheckAndContinue(Loading the code object, err);
1068
1069 // cannot iterate over symbols until executable is frozen
1070 }
1071 module_load_success = true;
1072 } while (0);
1073 DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1074 if (module_load_success) {
1075 /* Freeze the executable; it can now be queried for symbols. */
1076 err = hsa_executable_freeze(executable, "");
1077 ErrorCheck(Freeze the executable, err);
1078
1079 err = hsa_executable_iterate_symbols(executable, populate_InfoTables,
1080 static_cast<void *>(&gpu));
1081 ErrorCheck(Iterating over symbols for execuatable, err);
1082
1083 // save the executable and destroy during finalize
1084 g_executables.push_back(executable);
1085 return ATMI_STATUS_SUCCESS;
1086 } else {
1087 return ATMI_STATUS_ERROR;
1088 }
1089 }
1090
1091 } // namespace core
1092