Lines Matching refs:dev
543 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) in swiotlb_align_offset() argument
545 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); in swiotlb_align_offset()
551 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, in swiotlb_bounce() argument
554 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce()
566 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_bounce()
568 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
576 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
586 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
647 static int swiotlb_do_find_slots(struct device *dev, int area_index, in swiotlb_do_find_slots() argument
651 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_do_find_slots()
653 unsigned long boundary_mask = dma_get_seg_boundary(dev); in swiotlb_do_find_slots()
655 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; in swiotlb_do_find_slots()
658 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); in swiotlb_do_find_slots()
661 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_do_find_slots()
737 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, in swiotlb_find_slots() argument
740 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots()
745 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size, in swiotlb_find_slots()
766 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, in swiotlb_tbl_map_single() argument
771 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single()
772 unsigned int offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_tbl_map_single()
778 dev_warn_ratelimited(dev, in swiotlb_tbl_map_single()
787 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", in swiotlb_tbl_map_single()
792 index = swiotlb_find_slots(dev, orig_addr, in swiotlb_tbl_map_single()
796 dev_warn_ratelimited(dev, in swiotlb_tbl_map_single()
817 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); in swiotlb_tbl_map_single()
821 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) in swiotlb_release_slots() argument
823 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_release_slots()
825 unsigned int offset = swiotlb_align_offset(dev, tlb_addr); in swiotlb_release_slots()
871 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, in swiotlb_tbl_unmap_single() argument
880 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); in swiotlb_tbl_unmap_single()
882 swiotlb_release_slots(dev, tlb_addr); in swiotlb_tbl_unmap_single()
885 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, in swiotlb_sync_single_for_device() argument
889 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); in swiotlb_sync_single_for_device()
894 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, in swiotlb_sync_single_for_cpu() argument
898 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); in swiotlb_sync_single_for_cpu()
907 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, in swiotlb_map() argument
913 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); in swiotlb_map()
915 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, in swiotlb_map()
921 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); in swiotlb_map()
922 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { in swiotlb_map()
923 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, in swiotlb_map()
925 dev_WARN_ONCE(dev, 1, in swiotlb_map()
927 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
931 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in swiotlb_map()
936 size_t swiotlb_max_mapping_size(struct device *dev) in swiotlb_max_mapping_size() argument
938 int min_align_mask = dma_get_min_align_mask(dev); in swiotlb_max_mapping_size()
952 bool is_swiotlb_active(struct device *dev) in is_swiotlb_active() argument
954 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active()
993 struct page *swiotlb_alloc(struct device *dev, size_t size) in swiotlb_alloc() argument
995 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc()
1002 index = swiotlb_find_slots(dev, 0, size, 0); in swiotlb_alloc()
1011 bool swiotlb_free(struct device *dev, struct page *page, size_t size) in swiotlb_free() argument
1015 if (!is_swiotlb_buffer(dev, tlb_addr)) in swiotlb_free()
1018 swiotlb_release_slots(dev, tlb_addr); in swiotlb_free()
1024 struct device *dev) in rmem_swiotlb_device_init() argument
1033 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping."); in rmem_swiotlb_device_init()
1072 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()
1078 struct device *dev) in rmem_swiotlb_device_release() argument
1080 dev->dma_io_tlb_mem = &io_tlb_default_mem; in rmem_swiotlb_device_release()