• Home
  • Raw
  • Download

Lines Matching refs:virt_addr

12 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)  in hl_is_dram_va()  argument
16 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_is_dram_va()
145 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, in hl_mmu_unmap_page() argument
159 is_dram_addr = hl_is_dram_va(hdev, virt_addr); in hl_mmu_unmap_page()
194 real_virt_addr = virt_addr; in hl_mmu_unmap_page()
233 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, in hl_mmu_map_page() argument
248 is_dram_addr = hl_is_dram_va(hdev, virt_addr); in hl_mmu_map_page()
292 (hdev->asic_funcs->scramble_addr(hdev, virt_addr) & in hl_mmu_map_page()
295 (virt_addr & (real_page_size - 1))))) in hl_mmu_map_page()
298 phys_addr, virt_addr, real_page_size); in hl_mmu_map_page()
301 real_virt_addr = virt_addr; in hl_mmu_map_page()
322 real_virt_addr = virt_addr; in hl_mmu_map_page()
347 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, in hl_mmu_map_contiguous() argument
357 if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_map_contiguous()
360 else if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_map_contiguous()
363 else if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_map_contiguous()
370 curr_va = virt_addr + off; in hl_mmu_map_contiguous()
387 curr_va = virt_addr + off; in hl_mmu_map_contiguous()
406 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) in hl_mmu_unmap_contiguous() argument
415 if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_unmap_contiguous()
418 else if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_unmap_contiguous()
421 else if (hl_mem_area_inside_range(virt_addr, size, in hl_mmu_unmap_contiguous()
428 curr_va = virt_addr + off; in hl_mmu_unmap_contiguous()
479 static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, in hl_mmu_pa_page_with_offset() argument
520 (virt_addr & page_offset_mask); in hl_mmu_pa_page_with_offset()
534 (virt_addr & offset_mask); in hl_mmu_pa_page_with_offset()
538 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) in hl_mmu_va_to_pa() argument
545 rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); in hl_mmu_va_to_pa()
549 hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr); in hl_mmu_va_to_pa()
554 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, in hl_mmu_get_tlb_info() argument
566 hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ in hl_mmu_get_tlb_info()
568 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, in hl_mmu_get_tlb_info()
579 virt_addr, hops); in hl_mmu_get_tlb_info()
582 virt_addr, hops); in hl_mmu_get_tlb_info()
588 hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, in hl_mmu_get_tlb_info()