• Home
  • Raw
  • Download

Lines Matching refs:cb

16 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)  in cb_map_mem()  argument
39 INIT_LIST_HEAD(&cb->va_block_list); in cb_map_mem()
41 for (bus_addr = cb->bus_address; in cb_map_mem()
42 bus_addr < cb->bus_address + cb->size; in cb_map_mem()
63 list_add_tail(&va_block->node, &cb->va_block_list); in cb_map_mem()
68 bus_addr = cb->bus_address; in cb_map_mem()
70 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
73 &cb->va_block_list)); in cb_map_mem()
88 cb->is_mmu_mapped = true; in cb_map_mem()
93 list_for_each_entry(va_block, &cb->va_block_list, node) { in cb_map_mem()
106 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_map_mem()
115 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
122 list_for_each_entry(va_block, &cb->va_block_list, node) in cb_unmap_mem()
125 &cb->va_block_list))) in cb_unmap_mem()
134 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { in cb_unmap_mem()
141 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
143 if (cb->is_internal) in cb_fini()
145 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, in cb_fini()
148 cb->kernel_address, cb->bus_address); in cb_fini()
150 kfree(cb); in cb_fini()
153 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
155 if (cb->is_pool) { in cb_do_release()
157 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
160 cb_fini(hdev, cb); in cb_do_release()
167 struct hl_cb *cb; in cb_release() local
169 cb = container_of(ref, struct hl_cb, refcount); in cb_release()
170 hdev = cb->hdev; in cb_release()
172 hl_debugfs_remove_cb(cb); in cb_release()
174 if (cb->is_mmu_mapped) in cb_release()
175 cb_unmap_mem(cb->ctx, cb); in cb_release()
177 hl_ctx_put(cb->ctx); in cb_release()
179 cb_do_release(hdev, cb); in cb_release()
185 struct hl_cb *cb; in hl_cb_alloc() local
198 cb = kzalloc(sizeof(*cb), GFP_ATOMIC); in hl_cb_alloc()
200 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in hl_cb_alloc()
202 if (!cb) in hl_cb_alloc()
208 kfree(cb); in hl_cb_alloc()
213 cb->is_internal = true; in hl_cb_alloc()
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
217 &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
220 &cb->bus_address, in hl_cb_alloc()
228 kfree(cb); in hl_cb_alloc()
232 cb->kernel_address = p; in hl_cb_alloc()
233 cb->size = cb_size; in hl_cb_alloc()
235 return cb; in hl_cb_alloc()
242 struct hl_cb *cb; in hl_cb_create() local
275 cb = list_first_entry(&hdev->cb_pool, in hl_cb_create()
276 typeof(*cb), pool_list); in hl_cb_create()
277 list_del(&cb->pool_list); in hl_cb_create()
288 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); in hl_cb_create()
289 if (!cb) { in hl_cb_create()
295 cb->hdev = hdev; in hl_cb_create()
296 cb->ctx = ctx; in hl_cb_create()
297 hl_ctx_get(hdev, cb->ctx); in hl_cb_create()
307 rc = cb_map_mem(ctx, cb); in hl_cb_create()
313 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); in hl_cb_create()
321 cb->id = (u64) rc; in hl_cb_create()
323 kref_init(&cb->refcount); in hl_cb_create()
324 spin_lock_init(&cb->lock); in hl_cb_create()
330 *handle = cb->id | HL_MMAP_TYPE_CB; in hl_cb_create()
333 hl_debugfs_add_cb(cb); in hl_cb_create()
338 if (cb->is_mmu_mapped) in hl_cb_create()
339 cb_unmap_mem(cb->ctx, cb); in hl_cb_create()
341 hl_ctx_put(cb->ctx); in hl_cb_create()
342 cb_do_release(hdev, cb); in hl_cb_create()
351 struct hl_cb *cb; in hl_cb_destroy() local
364 cb = idr_find(&mgr->cb_handles, handle); in hl_cb_destroy()
365 if (cb) { in hl_cb_destroy()
368 kref_put(&cb->refcount, cb_release); in hl_cb_destroy()
426 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; in cb_vm_close() local
429 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); in cb_vm_close()
432 cb->mmap_size = new_mmap_size; in cb_vm_close()
436 spin_lock(&cb->lock); in cb_vm_close()
437 cb->mmap = false; in cb_vm_close()
438 spin_unlock(&cb->lock); in cb_vm_close()
440 hl_cb_put(cb); in cb_vm_close()
451 struct hl_cb *cb; in hl_cb_mmap() local
462 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); in hl_cb_mmap()
463 if (!cb) { in hl_cb_mmap()
471 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { in hl_cb_mmap()
474 vma->vm_end - vma->vm_start, cb->size); in hl_cb_mmap()
489 spin_lock(&cb->lock); in hl_cb_mmap()
491 if (cb->mmap) { in hl_cb_mmap()
498 cb->mmap = true; in hl_cb_mmap()
500 spin_unlock(&cb->lock); in hl_cb_mmap()
509 vma->vm_private_data = cb; in hl_cb_mmap()
511 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, in hl_cb_mmap()
512 cb->bus_address, cb->size); in hl_cb_mmap()
514 spin_lock(&cb->lock); in hl_cb_mmap()
515 cb->mmap = false; in hl_cb_mmap()
519 cb->mmap_size = cb->size; in hl_cb_mmap()
524 spin_unlock(&cb->lock); in hl_cb_mmap()
526 hl_cb_put(cb); in hl_cb_mmap()
533 struct hl_cb *cb; in hl_cb_get() local
536 cb = idr_find(&mgr->cb_handles, handle); in hl_cb_get()
538 if (!cb) { in hl_cb_get()
545 kref_get(&cb->refcount); in hl_cb_get()
549 return cb; in hl_cb_get()
553 void hl_cb_put(struct hl_cb *cb) in hl_cb_put() argument
555 kref_put(&cb->refcount, cb_release); in hl_cb_put()
566 struct hl_cb *cb; in hl_cb_mgr_fini() local
572 idr_for_each_entry(idp, cb, id) { in hl_cb_mgr_fini()
573 if (kref_put(&cb->refcount, cb_release) != 1) in hl_cb_mgr_fini()
576 id, cb->ctx->asid); in hl_cb_mgr_fini()
586 struct hl_cb *cb; in hl_cb_kernel_create() local
598 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); in hl_cb_kernel_create()
600 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); in hl_cb_kernel_create()
601 if (!cb) in hl_cb_kernel_create()
604 return cb; in hl_cb_kernel_create()
614 struct hl_cb *cb; in hl_cb_pool_init() local
621 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
623 if (cb) { in hl_cb_pool_init()
624 cb->is_pool = true; in hl_cb_pool_init()
625 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
637 struct hl_cb *cb, *tmp; in hl_cb_pool_fini() local
639 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
640 list_del(&cb->pool_list); in hl_cb_pool_fini()
641 cb_fini(hdev, cb); in hl_cb_pool_fini()