• Home
  • Raw
  • Download

Lines Matching +full:0 +full:- +full:dev

5  * SPDX-License-Identifier: MIT
25 #include "drm-uapi/dma-buf.h"
28 #include "util/mesa-sha1.h"
40 asahi_simple_ioctl(struct agx_device *dev, unsigned cmd, void *req) in asahi_simple_ioctl() argument
42 if (dev->is_virtio) { in asahi_simple_ioctl()
43 return agx_virtio_simple_ioctl(dev, cmd, req); in asahi_simple_ioctl()
45 return drmIoctl(dev->fd, cmd, req); in asahi_simple_ioctl()
49 /* clang-format off */
53 {"no16", AGX_DBG_NO16, "Disable 16-bit support"},
58 {"precompile",AGX_DBG_PRECOMPILE,"Precompile shaders for shader-db"},
65 {"nowc", AGX_DBG_NOWC, "Disable write-combining"},
77 /* clang-format on */
80 agx_bo_free(struct agx_device *dev, struct agx_bo *bo) in agx_bo_free() argument
82 const uint64_t handle = bo->handle; in agx_bo_free()
84 if (bo->_map) in agx_bo_free()
85 munmap(bo->_map, bo->size); in agx_bo_free()
90 agx_va_free(dev, bo->va); in agx_bo_free()
92 if (bo->prime_fd != -1) in agx_bo_free()
93 close(bo->prime_fd); in agx_bo_free()
97 memset(bo, 0, sizeof(*bo)); in agx_bo_free()
101 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args); in agx_bo_free()
105 agx_bo_bind(struct agx_device *dev, struct agx_bo *bo, uint64_t addr, in agx_bo_bind() argument
111 .handle = bo->handle, in agx_bo_bind()
112 .vm_id = dev->vm_id, in agx_bo_bind()
118 int ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_BIND, &gem_bind); in agx_bo_bind()
121 bo->handle); in agx_bo_bind()
128 agx_bo_alloc(struct agx_device *dev, size_t size, size_t align, in agx_bo_alloc() argument
132 unsigned handle = 0; in agx_bo_alloc()
144 gem_create.vm_id = dev->vm_id; in agx_bo_alloc()
147 int ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_CREATE, &gem_create); in agx_bo_alloc()
155 pthread_mutex_lock(&dev->bo_map_lock); in agx_bo_alloc()
156 bo = agx_lookup_bo(dev, handle); in agx_bo_alloc()
157 dev->max_handle = MAX2(dev->max_handle, handle); in agx_bo_alloc()
158 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_alloc()
163 bo->dev = dev; in agx_bo_alloc()
164 bo->size = gem_create.size; in agx_bo_alloc()
165 bo->align = align; in agx_bo_alloc()
166 bo->flags = flags; in agx_bo_alloc()
167 bo->handle = handle; in agx_bo_alloc()
168 bo->prime_fd = -1; in agx_bo_alloc()
170 enum agx_va_flags va_flags = flags & AGX_BO_LOW_VA ? AGX_VA_USC : 0; in agx_bo_alloc()
171 bo->va = agx_va_alloc(dev, size, bo->align, va_flags, 0); in agx_bo_alloc()
172 if (!bo->va) { in agx_bo_alloc()
174 agx_bo_free(dev, bo); in agx_bo_alloc()
183 ret = dev->ops.bo_bind(dev, bo, bo->va->addr, bo->size, 0, bind, false); in agx_bo_alloc()
185 agx_bo_free(dev, bo); in agx_bo_alloc()
193 agx_bo_mmap(struct agx_device *dev, struct agx_bo *bo) in agx_bo_mmap() argument
195 assert(bo->_map == NULL && "not double mapped"); in agx_bo_mmap()
197 struct drm_asahi_gem_mmap_offset gem_mmap_offset = {.handle = bo->handle}; in agx_bo_mmap()
200 ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET, &gem_mmap_offset); in agx_bo_mmap()
203 assert(0); in agx_bo_mmap()
206 bo->_map = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, in agx_bo_mmap()
207 dev->fd, gem_mmap_offset.offset); in agx_bo_mmap()
208 if (bo->_map == MAP_FAILED) { in agx_bo_mmap()
209 bo->_map = NULL; in agx_bo_mmap()
211 "mmap failed: result=%p size=0x%llx fd=%i offset=0x%llx %m\n", in agx_bo_mmap()
212 bo->_map, (long long)bo->size, dev->fd, in agx_bo_mmap()
218 agx_bo_import(struct agx_device *dev, int fd) in agx_bo_import() argument
224 pthread_mutex_lock(&dev->bo_map_lock); in agx_bo_import()
226 ret = drmPrimeFDToHandle(dev->fd, fd, &gem_handle); in agx_bo_import()
229 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_import()
233 bo = agx_lookup_bo(dev, gem_handle); in agx_bo_import()
234 dev->max_handle = MAX2(dev->max_handle, gem_handle); in agx_bo_import()
236 if (!bo->size) { in agx_bo_import()
237 bo->dev = dev; in agx_bo_import()
238 bo->size = lseek(fd, 0, SEEK_END); in agx_bo_import()
239 bo->align = dev->params.vm_page_size; in agx_bo_import()
241 /* Sometimes this can fail and return -1. size of -1 is not in agx_bo_import()
245 if ((bo->size == 0) || (bo->size == (size_t)-1)) { in agx_bo_import()
246 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_import()
249 if (bo->size & (dev->params.vm_page_size - 1)) { in agx_bo_import()
252 "import failed: BO is not a multiple of the page size (0x%llx bytes)\n", in agx_bo_import()
253 (long long)bo->size); in agx_bo_import()
257 bo->flags = AGX_BO_SHARED | AGX_BO_SHAREABLE; in agx_bo_import()
258 bo->handle = gem_handle; in agx_bo_import()
259 bo->prime_fd = os_dupfd_cloexec(fd); in agx_bo_import()
260 bo->label = "Imported BO"; in agx_bo_import()
261 assert(bo->prime_fd >= 0); in agx_bo_import()
263 p_atomic_set(&bo->refcnt, 1); in agx_bo_import()
264 bo->va = agx_va_alloc(dev, bo->size, bo->align, 0, 0); in agx_bo_import()
266 if (!bo->va) { in agx_bo_import()
269 "import failed: Could not allocate from VMA heap (0x%llx bytes)\n", in agx_bo_import()
270 (long long)bo->size); in agx_bo_import()
274 if (dev->is_virtio) { in agx_bo_import()
275 bo->vbo_res_id = vdrm_handle_to_res_id(dev->vdrm, bo->handle); in agx_bo_import()
278 ret = dev->ops.bo_bind(dev, bo, bo->va->addr, bo->size, 0, in agx_bo_import()
281 fprintf(stderr, "import failed: Could not bind BO at 0x%llx\n", in agx_bo_import()
282 (long long)bo->va->addr); in agx_bo_import()
286 /* bo->refcnt == 0 can happen if the BO in agx_bo_import()
289 * is 0 and we can't use agx_bo_reference() directly, we in agx_bo_import()
290 * have to re-initialize the refcnt(). in agx_bo_import()
296 if (p_atomic_read(&bo->refcnt) == 0) in agx_bo_import()
297 p_atomic_set(&bo->refcnt, 1); in agx_bo_import()
301 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_import()
303 assert(bo->dev != NULL && "post-condition"); in agx_bo_import()
305 if (dev->debug & AGX_DBG_TRACE) { in agx_bo_import()
307 agxdecode_track_alloc(dev->agxdecode, bo); in agx_bo_import()
313 memset(bo, 0, sizeof(*bo)); in agx_bo_import()
314 pthread_mutex_unlock(&dev->bo_map_lock); in agx_bo_import()
319 agx_bo_export(struct agx_device *dev, struct agx_bo *bo) in agx_bo_export() argument
323 assert(bo->flags & AGX_BO_SHAREABLE); in agx_bo_export()
325 if (drmPrimeHandleToFD(dev->fd, bo->handle, DRM_CLOEXEC, &fd)) in agx_bo_export()
326 return -1; in agx_bo_export()
328 if (!(bo->flags & AGX_BO_SHARED)) { in agx_bo_export()
329 bo->flags |= AGX_BO_SHARED; in agx_bo_export()
330 assert(bo->prime_fd == -1); in agx_bo_export()
331 bo->prime_fd = os_dupfd_cloexec(fd); in agx_bo_export()
336 uint64_t writer = p_atomic_read_relaxed(&bo->writer); in agx_bo_export()
338 int out_sync_fd = -1; in agx_bo_export()
340 dev->fd, agx_bo_writer_syncobj(writer), &out_sync_fd); in agx_bo_export()
341 assert(ret >= 0); in agx_bo_export()
342 assert(out_sync_fd >= 0); in agx_bo_export()
344 ret = agx_import_sync_file(dev, bo, out_sync_fd); in agx_bo_export()
345 assert(ret >= 0); in agx_bo_export()
350 assert(bo->prime_fd >= 0); in agx_bo_export()
355 agx_bo_bind_object(struct agx_device *dev, struct agx_bo *bo, in agx_bo_bind_object() argument
362 .handle = bo->handle, in agx_bo_bind_object()
363 .vm_id = 0, in agx_bo_bind_object()
368 int ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_BIND_OBJECT, &gem_bind); in agx_bo_bind_object()
372 bo->handle); in agx_bo_bind_object()
381 agx_bo_unbind_object(struct agx_device *dev, uint32_t object_handle, in agx_bo_unbind_object() argument
390 int ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_BIND_OBJECT, &gem_bind); in agx_bo_unbind_object()
401 agx_get_global_ids(struct agx_device *dev) in agx_get_global_ids() argument
403 dev->next_global_id = 0; in agx_get_global_ids()
404 dev->last_global_id = 0x1000000; in agx_get_global_ids()
408 agx_get_global_id(struct agx_device *dev) in agx_get_global_id() argument
410 if (unlikely(dev->next_global_id >= dev->last_global_id)) { in agx_get_global_id()
411 agx_get_global_ids(dev); in agx_get_global_id()
414 return dev->next_global_id++; in agx_get_global_id()
418 agx_get_params(struct agx_device *dev, void *buf, size_t size) in agx_get_params() argument
421 .param_group = 0, in agx_get_params()
426 memset(buf, 0, size); in agx_get_params()
428 int ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GET_PARAMS, &get_param); in agx_get_params()
431 return -EINVAL; in agx_get_params()
438 agx_submit(struct agx_device *dev, struct drm_asahi_submit *submit, in agx_submit() argument
441 return drmIoctl(dev->fd, DRM_IOCTL_ASAHI_SUBMIT, submit); in agx_submit()
457 while (n != 0) { in gcd()
467 agx_init_timestamps(struct agx_device *dev) in agx_init_timestamps() argument
469 uint64_t ts_gcd = gcd(dev->params.timer_frequency_hz, NSEC_PER_SEC); in agx_init_timestamps()
471 dev->timestamp_to_ns.num = NSEC_PER_SEC / ts_gcd; in agx_init_timestamps()
472 dev->timestamp_to_ns.den = dev->params.timer_frequency_hz / ts_gcd; in agx_init_timestamps()
474 uint64_t user_ts_gcd = gcd(dev->params.timer_frequency_hz, NSEC_PER_SEC); in agx_init_timestamps()
476 dev->user_timestamp_to_ns.num = NSEC_PER_SEC / user_ts_gcd; in agx_init_timestamps()
477 dev->user_timestamp_to_ns.den = in agx_init_timestamps()
478 dev->params.user_timestamp_frequency_hz / user_ts_gcd; in agx_init_timestamps()
482 agx_open_device(void *memctx, struct agx_device *dev) in agx_open_device() argument
484 dev->debug = in agx_open_device()
485 debug_get_flags_option("ASAHI_MESA_DEBUG", agx_debug_options, 0); in agx_open_device()
487 dev->ops = agx_device_drm_ops; in agx_open_device()
489 ssize_t params_size = -1; in agx_open_device()
493 drmVersionPtr version = drmGetVersion(dev->fd); in agx_open_device()
499 if (!strcmp(version->name, "asahi")) { in agx_open_device()
500 dev->is_virtio = false; in agx_open_device()
501 dev->ops = agx_device_drm_ops; in agx_open_device()
502 } else if (!strcmp(version->name, "virtio_gpu")) { in agx_open_device()
503 dev->is_virtio = true; in agx_open_device()
504 if (!agx_virtio_open_device(dev)) { in agx_open_device()
507 "Error opening virtio-gpu device for Asahi native context\n"); in agx_open_device()
517 params_size = dev->ops.get_params(dev, &dev->params, sizeof(dev->params)); in agx_open_device()
518 if (params_size <= 0) { in agx_open_device()
519 assert(0); in agx_open_device()
522 assert(params_size >= sizeof(dev->params)); in agx_open_device()
525 if (dev->params.unstable_uabi_version != DRM_ASAHI_UNSTABLE_UABI_VERSION) { in agx_open_device()
557 dev->params.feat_incompat & (~AGX_SUPPORTED_INCOMPAT_FEATURES); in agx_open_device()
559 fprintf(stderr, "Missing GPU incompat features: 0x%" PRIx64 "\n", in agx_open_device()
561 assert(0); in agx_open_device()
565 assert(dev->params.gpu_generation >= 13); in agx_open_device()
567 switch (dev->params.gpu_variant) { in agx_open_device()
581 snprintf(dev->name, sizeof(dev->name), "Apple M%d%s (G%d%c %02X)", in agx_open_device()
582 dev->params.gpu_generation - 12, variant, in agx_open_device()
583 dev->params.gpu_generation, dev->params.gpu_variant, in agx_open_device()
584 dev->params.gpu_revision + 0xA0); in agx_open_device()
588 * address is zero, 36-bits is therefore enough to trap any zero-extended in agx_open_device()
589 * 32-bit index. For more generality we would need a larger carveout, but in agx_open_device()
602 dev->guard_size = dev->params.vm_page_size; in agx_open_device()
603 if (dev->params.vm_usc_start) { in agx_open_device()
604 dev->shader_base = dev->params.vm_usc_start; in agx_open_device()
607 dev->shader_base = ALIGN_POT(MAX2(dev->params.vm_user_start, reservation), in agx_open_device()
608 0x100000000ull); in agx_open_device()
611 if (dev->shader_base < reservation) { in agx_open_device()
614 assert(0); in agx_open_device()
618 uint64_t shader_size = 0x100000000ull; in agx_open_device()
620 uint64_t user_start = dev->shader_base + shader_size; in agx_open_device()
622 assert(dev->shader_base >= dev->params.vm_user_start); in agx_open_device()
623 assert(user_start < dev->params.vm_user_end); in agx_open_device()
625 dev->agxdecode = agxdecode_new_context(dev->shader_base); in agx_open_device()
627 agx_init_timestamps(dev); in agx_open_device()
629 util_sparse_array_init(&dev->bo_map, sizeof(struct agx_bo), 512); in agx_open_device()
630 pthread_mutex_init(&dev->bo_map_lock, NULL); in agx_open_device()
632 simple_mtx_init(&dev->bo_cache.lock, mtx_plain); in agx_open_device()
633 list_inithead(&dev->bo_cache.lru); in agx_open_device()
635 for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i) in agx_open_device()
636 list_inithead(&dev->bo_cache.buckets[i]); in agx_open_device()
641 uint64_t kernel_size = MAX2(dev->params.vm_kernel_min_size, 32ull << 30); in agx_open_device()
643 .kernel_start = dev->params.vm_user_end - kernel_size, in agx_open_device()
644 .kernel_end = dev->params.vm_user_end, in agx_open_device()
647 uint64_t user_size = vm_create.kernel_start - user_start; in agx_open_device()
649 int ret = asahi_simple_ioctl(dev, DRM_IOCTL_ASAHI_VM_CREATE, &vm_create); in agx_open_device()
652 assert(0); in agx_open_device()
656 simple_mtx_init(&dev->vma_lock, mtx_plain); in agx_open_device()
657 util_vma_heap_init(&dev->main_heap, user_start, user_size); in agx_open_device()
658 util_vma_heap_init(&dev->usc_heap, dev->shader_base, shader_size); in agx_open_device()
660 dev->vm_id = vm_create.vm_id; in agx_open_device()
662 agx_get_global_ids(dev); in agx_open_device()
667 dev->libagx = nir_deserialize(memctx, &agx_nir_options, &blob); in agx_open_device()
669 if (agx_gather_device_key(dev).needs_g13x_coherency == U_TRISTATE_YES) { in agx_open_device()
670 dev->libagx_programs = libagx_g13x; in agx_open_device()
672 dev->libagx_programs = libagx_g13g; in agx_open_device()
675 if (dev->params.gpu_generation >= 14 && dev->params.num_clusters_total > 1) { in agx_open_device()
676 dev->chip = AGX_CHIP_G14X; in agx_open_device()
677 } else if (dev->params.gpu_generation >= 14) { in agx_open_device()
678 dev->chip = AGX_CHIP_G14G; in agx_open_device()
679 } else if (dev->params.gpu_generation >= 13 && in agx_open_device()
680 dev->params.num_clusters_total > 1) { in agx_open_device()
681 dev->chip = AGX_CHIP_G13X; in agx_open_device()
683 dev->chip = AGX_CHIP_G13G; in agx_open_device()
686 void *bo = agx_bo_create(dev, LIBAGX_PRINTF_BUFFER_SIZE, 0, AGX_BO_WRITEBACK, in agx_open_device()
689 ret = dev->ops.bo_bind(dev, bo, LIBAGX_PRINTF_BUFFER_ADDRESS, in agx_open_device()
690 LIBAGX_PRINTF_BUFFER_SIZE, 0, in agx_open_device()
697 u_printf_init(&dev->printf, bo, agx_bo_map(bo)); in agx_open_device()
699 u_printf_singleton_add(dev->libagx->printf_info, in agx_open_device()
700 dev->libagx->printf_info_count); in agx_open_device()
705 agx_close_device(struct agx_device *dev) in agx_close_device() argument
707 agx_bo_unreference(dev, dev->printf.bo); in agx_close_device()
708 u_printf_destroy(&dev->printf); in agx_close_device()
709 ralloc_free((void *)dev->libagx); in agx_close_device()
710 agx_bo_cache_evict_all(dev); in agx_close_device()
711 util_sparse_array_finish(&dev->bo_map); in agx_close_device()
712 agxdecode_destroy_context(dev->agxdecode); in agx_close_device()
714 util_vma_heap_finish(&dev->main_heap); in agx_close_device()
715 util_vma_heap_finish(&dev->usc_heap); in agx_close_device()
719 close(dev->fd); in agx_close_device()
723 agx_create_command_queue(struct agx_device *dev, uint32_t caps, in agx_create_command_queue() argument
727 if (dev->debug & AGX_DBG_1QUEUE) { in agx_create_command_queue()
729 simple_mtx_lock(&dev->vma_lock); in agx_create_command_queue()
730 if (dev->queue_id) { in agx_create_command_queue()
731 simple_mtx_unlock(&dev->vma_lock); in agx_create_command_queue()
732 return dev->queue_id; in agx_create_command_queue()
737 .vm_id = dev->vm_id, in agx_create_command_queue()
740 .flags = 0, in agx_create_command_queue()
744 asahi_simple_ioctl(dev, DRM_IOCTL_ASAHI_QUEUE_CREATE, &queue_create); in agx_create_command_queue()
747 assert(0); in agx_create_command_queue()
750 if (dev->debug & AGX_DBG_1QUEUE) { in agx_create_command_queue()
751 dev->queue_id = queue_create.queue_id; in agx_create_command_queue()
752 simple_mtx_unlock(&dev->vma_lock); in agx_create_command_queue()
759 agx_destroy_command_queue(struct agx_device *dev, uint32_t queue_id) in agx_destroy_command_queue() argument
761 if (dev->debug & AGX_DBG_1QUEUE) in agx_destroy_command_queue()
762 return 0; in agx_destroy_command_queue()
768 return asahi_simple_ioctl(dev, DRM_IOCTL_ASAHI_QUEUE_DESTROY, in agx_destroy_command_queue()
773 agx_import_sync_file(struct agx_device *dev, struct agx_bo *bo, int fd) in agx_import_sync_file() argument
780 assert(fd >= 0); in agx_import_sync_file()
781 assert(bo->prime_fd != -1); in agx_import_sync_file()
783 int ret = drmIoctl(bo->prime_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, in agx_import_sync_file()
785 assert(ret >= 0); in agx_import_sync_file()
791 agx_export_sync_file(struct agx_device *dev, struct agx_bo *bo) in agx_export_sync_file() argument
795 .fd = -1, in agx_export_sync_file()
798 assert(bo->prime_fd != -1); in agx_export_sync_file()
800 int ret = drmIoctl(bo->prime_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, in agx_export_sync_file()
802 assert(ret >= 0); in agx_export_sync_file()
803 assert(export_sync_file_ioctl.fd >= 0); in agx_export_sync_file()
805 return ret >= 0 ? export_sync_file_ioctl.fd : ret; in agx_export_sync_file()
809 agx_debug_fault(struct agx_device *dev, uint64_t addr) in agx_debug_fault() argument
811 pthread_mutex_lock(&dev->bo_map_lock); in agx_debug_fault()
815 for (uint32_t handle = 0; handle < dev->max_handle; handle++) { in agx_debug_fault()
816 struct agx_bo *bo = agx_lookup_bo(dev, handle); in agx_debug_fault()
817 if (!bo->va) in agx_debug_fault()
820 uint64_t bo_addr = bo->va->addr; in agx_debug_fault()
821 if (bo->flags & AGX_BO_LOW_VA) in agx_debug_fault()
822 bo_addr += dev->shader_base; in agx_debug_fault()
824 if (!bo->size || bo_addr > addr) in agx_debug_fault()
827 if (!best || bo_addr > best->va->addr) in agx_debug_fault()
832 mesa_logw("Address 0x%" PRIx64 " is unknown\n", addr); in agx_debug_fault()
834 uint64_t start = best->va->addr; in agx_debug_fault()
835 uint64_t end = best->va->addr + best->size; in agx_debug_fault()
838 mesa_logw("Address 0x%" PRIx64 " is unknown\n", addr); in agx_debug_fault()
840 mesa_logw("Address 0x%" PRIx64 " is 0x%" PRIx64 in agx_debug_fault()
841 " bytes beyond an object at 0x%" PRIx64 "..0x%" PRIx64 in agx_debug_fault()
843 addr, addr - end, start, end - 1, best->label); in agx_debug_fault()
845 mesa_logw("Address 0x%" PRIx64 " is 0x%" PRIx64 in agx_debug_fault()
846 " bytes into an object at 0x%" PRIx64 "..0x%" PRIx64 in agx_debug_fault()
848 addr, addr - start, start, end - 1, best->label); in agx_debug_fault()
852 pthread_mutex_unlock(&dev->bo_map_lock); in agx_debug_fault()
856 agx_get_gpu_timestamp(struct agx_device *dev) in agx_get_gpu_timestamp() argument
858 if (dev->params.feat_compat & DRM_ASAHI_FEAT_GETTIME) { in agx_get_gpu_timestamp()
859 struct drm_asahi_get_time get_time = {.flags = 0, .extensions = 0}; in agx_get_gpu_timestamp()
861 int ret = asahi_simple_ioctl(dev, DRM_IOCTL_ASAHI_GET_TIME, &get_time); in agx_get_gpu_timestamp()
870 __asm__ volatile("mrs \t%0, cntvct_el0" : "=r"(ret)); in agx_get_gpu_timestamp()
886 agx_get_device_uuid(const struct agx_device *dev, void *uuid) in agx_get_device_uuid() argument
898 _mesa_sha1_update(&sha1_ctx, &dev->params.gpu_generation, in agx_get_device_uuid()
899 sizeof(dev->params.gpu_generation)); in agx_get_device_uuid()
900 _mesa_sha1_update(&sha1_ctx, &dev->params.gpu_variant, in agx_get_device_uuid()
901 sizeof(dev->params.gpu_variant)); in agx_get_device_uuid()
902 _mesa_sha1_update(&sha1_ctx, &dev->params.gpu_revision, in agx_get_device_uuid()
903 sizeof(dev->params.gpu_revision)); in agx_get_device_uuid()
936 agx_get_num_cores(const struct agx_device *dev) in agx_get_num_cores() argument
938 unsigned n = 0; in agx_get_num_cores()
940 for (unsigned cl = 0; cl < dev->params.num_clusters_total; cl++) { in agx_get_num_cores()
941 n += util_bitcount(dev->params.core_masks[cl]); in agx_get_num_cores()
948 agx_gather_device_key(struct agx_device *dev) in agx_gather_device_key() argument
950 bool g13x_coh = (dev->params.gpu_generation == 13 && in agx_gather_device_key()
951 dev->params.num_clusters_total > 1) || in agx_gather_device_key()
952 dev->params.num_dies > 1; in agx_gather_device_key()
956 .soft_fault = agx_has_soft_fault(dev), in agx_gather_device_key()