1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SWIOTLB_H
3 #define __LINUX_SWIOTLB_H
4
5 #include <linux/dma-direction.h>
6 #include <linux/init.h>
7 #include <linux/types.h>
8 #include <linux/limits.h>
9
10 struct device;
11 struct page;
12 struct scatterlist;
13
14 enum swiotlb_force {
15 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
16 SWIOTLB_FORCE, /* swiotlb=force */
17 SWIOTLB_NO_FORCE, /* swiotlb=noforce */
18 };
19
20 /*
21 * Maximum allowable number of contiguous slabs to map,
22 * must be a power of 2. What is the appropriate value ?
23 * The complexity of {map,unmap}_single is linearly dependent on this value.
24 */
25 #define IO_TLB_SEGSIZE 128
26
27 /*
28 * log of the size of each IO TLB slab. The number of slabs is command line
29 * controllable.
30 */
31 #define IO_TLB_SHIFT 11
32
33 extern void swiotlb_init(int verbose);
34 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
35 extern unsigned long swiotlb_nr_tbl(void);
36 unsigned long swiotlb_size_or_default(void);
37 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
38 extern void __init swiotlb_update_mem_attributes(void);
39
40 /*
41 * Enumeration for sync targets
42 */
43 enum dma_sync_target {
44 SYNC_FOR_CPU = 0,
45 SYNC_FOR_DEVICE = 1,
46 };
47
48 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
49 dma_addr_t tbl_dma_addr,
50 phys_addr_t phys,
51 size_t mapping_size,
52 size_t alloc_size,
53 enum dma_data_direction dir,
54 unsigned long attrs);
55
56 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
57 phys_addr_t tlb_addr,
58 size_t mapping_size,
59 size_t alloc_size,
60 enum dma_data_direction dir,
61 unsigned long attrs);
62
63 extern void swiotlb_tbl_sync_single(struct device *hwdev,
64 phys_addr_t tlb_addr,
65 size_t size, enum dma_data_direction dir,
66 enum dma_sync_target target);
67
68 #ifdef CONFIG_SWIOTLB
69 extern enum swiotlb_force swiotlb_force;
70 extern phys_addr_t io_tlb_start, io_tlb_end;
71
is_swiotlb_buffer(phys_addr_t paddr)72 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
73 {
74 return paddr >= io_tlb_start && paddr < io_tlb_end;
75 }
76
77 bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
78 size_t size, enum dma_data_direction dir, unsigned long attrs);
79 void __init swiotlb_exit(void);
80 unsigned int swiotlb_max_segment(void);
81 size_t swiotlb_max_mapping_size(struct device *dev);
82 bool is_swiotlb_active(void);
83 #else
84 #define swiotlb_force SWIOTLB_NO_FORCE
is_swiotlb_buffer(phys_addr_t paddr)85 static inline bool is_swiotlb_buffer(phys_addr_t paddr)
86 {
87 return false;
88 }
swiotlb_map(struct device * dev,phys_addr_t * phys,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)89 static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
90 dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
91 unsigned long attrs)
92 {
93 return false;
94 }
swiotlb_exit(void)95 static inline void swiotlb_exit(void)
96 {
97 }
swiotlb_max_segment(void)98 static inline unsigned int swiotlb_max_segment(void)
99 {
100 return 0;
101 }
swiotlb_max_mapping_size(struct device * dev)102 static inline size_t swiotlb_max_mapping_size(struct device *dev)
103 {
104 return SIZE_MAX;
105 }
106
is_swiotlb_active(void)107 static inline bool is_swiotlb_active(void)
108 {
109 return false;
110 }
111 #endif /* CONFIG_SWIOTLB */
112
113 extern void swiotlb_print_info(void);
114 extern void swiotlb_set_max_segment(unsigned int);
115
116 #endif /* __LINUX_SWIOTLB_H */
117