• Home
  • Raw
  • Download

Lines Matching refs:hdev

18 	struct hl_device *hdev = ctx->hdev;  in cb_map_mem()  local
19 struct asic_fixed_properties *prop = &hdev->asic_prop; in cb_map_mem()
27 if (!hdev->supports_cb_mapping) { in cb_map_mem()
28 dev_err_ratelimited(hdev->dev, in cb_map_mem()
33 if (!hdev->mmu_enable) { in cb_map_mem()
34 dev_err_ratelimited(hdev->dev, in cb_map_mem()
47 dev_err(hdev->dev, in cb_map_mem()
75 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", in cb_map_mem()
84 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); in cb_map_mem()
101 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); in cb_map_mem()
117 struct hl_device *hdev = ctx->hdev; in cb_unmap_mem() local
126 dev_warn_ratelimited(hdev->dev, in cb_unmap_mem()
130 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); in cb_unmap_mem()
141 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
144 gen_pool_free(hdev->internal_cb_pool, in cb_fini()
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, in cb_fini()
153 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
156 spin_lock(&hdev->cb_pool_lock); in cb_do_release()
157 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
158 spin_unlock(&hdev->cb_pool_lock); in cb_do_release()
160 cb_fini(hdev, cb); in cb_do_release()
166 struct hl_device *hdev; in cb_release() local
170 hdev = cb->hdev; in cb_release()
179 cb_do_release(hdev, cb); in cb_release()
182 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, in hl_cb_alloc() argument
206 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); in hl_cb_alloc()
212 cb_offset = p - hdev->internal_cb_pool_virt_addr; in hl_cb_alloc()
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
216 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, in hl_cb_alloc()
219 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, in hl_cb_alloc()
225 dev_err(hdev->dev, in hl_cb_alloc()
238 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, in hl_cb_create() argument
250 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && in hl_cb_create()
252 dev_warn_ratelimited(hdev->dev, in hl_cb_create()
259 dev_err(hdev->dev, "CB size %d must be less than %d\n", in hl_cb_create()
271 cb_size <= hdev->asic_prop.cb_pool_cb_size) { in hl_cb_create()
273 spin_lock(&hdev->cb_pool_lock); in hl_cb_create()
274 if (!list_empty(&hdev->cb_pool)) { in hl_cb_create()
275 cb = list_first_entry(&hdev->cb_pool, in hl_cb_create()
278 spin_unlock(&hdev->cb_pool_lock); in hl_cb_create()
281 spin_unlock(&hdev->cb_pool_lock); in hl_cb_create()
282 dev_dbg(hdev->dev, "CB pool is empty\n"); in hl_cb_create()
288 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); in hl_cb_create()
295 cb->hdev = hdev; in hl_cb_create()
297 hl_ctx_get(hdev, cb->ctx); in hl_cb_create()
301 dev_err(hdev->dev, in hl_cb_create()
317 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); in hl_cb_create()
342 cb_do_release(hdev, cb); in hl_cb_create()
349 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) in hl_cb_destroy() argument
371 dev_err(hdev->dev, in hl_cb_destroy()
382 struct hl_device *hdev = hpriv->hdev; in hl_cb_ioctl() local
386 if (hl_device_disabled_or_in_reset(hdev)) { in hl_cb_ioctl()
387 dev_warn_ratelimited(hdev->dev, in hl_cb_ioctl()
389 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); in hl_cb_ioctl()
396 dev_err(hdev->dev, in hl_cb_ioctl()
401 rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, in hl_cb_ioctl()
412 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, in hl_cb_ioctl()
450 struct hl_device *hdev = hpriv->hdev; in hl_cb_mmap() local
462 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); in hl_cb_mmap()
464 dev_err(hdev->dev, in hl_cb_mmap()
472 dev_err(hdev->dev, in hl_cb_mmap()
481 dev_err(hdev->dev, in hl_cb_mmap()
492 dev_err(hdev->dev, in hl_cb_mmap()
511 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, in hl_cb_mmap()
530 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, in hl_cb_get() argument
540 dev_warn(hdev->dev, in hl_cb_get()
564 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) in hl_cb_mgr_fini() argument
574 dev_err(hdev->dev, in hl_cb_mgr_fini()
582 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, in hl_cb_kernel_create() argument
589 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, in hl_cb_kernel_create()
592 dev_err(hdev->dev, in hl_cb_kernel_create()
598 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); in hl_cb_kernel_create()
607 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); in hl_cb_kernel_create()
612 int hl_cb_pool_init(struct hl_device *hdev) in hl_cb_pool_init() argument
617 INIT_LIST_HEAD(&hdev->cb_pool); in hl_cb_pool_init()
618 spin_lock_init(&hdev->cb_pool_lock); in hl_cb_pool_init()
620 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { in hl_cb_pool_init()
621 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
625 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
627 hl_cb_pool_fini(hdev); in hl_cb_pool_init()
635 int hl_cb_pool_fini(struct hl_device *hdev) in hl_cb_pool_fini() argument
639 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
641 cb_fini(hdev, cb); in hl_cb_pool_fini()
649 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_init() local
650 struct asic_fixed_properties *prop = &hdev->asic_prop; in hl_cb_va_pool_init()
653 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_init()
658 dev_err(hdev->dev, in hl_cb_va_pool_init()
666 dev_err(hdev->dev, in hl_cb_va_pool_init()
681 struct hl_device *hdev = ctx->hdev; in hl_cb_va_pool_fini() local
683 if (!hdev->supports_cb_mapping) in hl_cb_va_pool_fini()