1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Internals of the DMA direct mapping implementation. Only for use by the
4 * DMA mapping code and IOMMU drivers.
5 */
6 #ifndef _LINUX_DMA_DIRECT_H
7 #define _LINUX_DMA_DIRECT_H 1
8
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/memblock.h> /* for min_low_pfn */
12 #include <linux/mem_encrypt.h>
13 #include <linux/swiotlb.h>
14
15 extern u64 zone_dma_limit;
16
17 /*
18 * Record the mapping of CPU physical to DMA addresses for a given region.
19 */
20 struct bus_dma_region {
21 phys_addr_t cpu_start;
22 dma_addr_t dma_start;
23 u64 size;
24 };
25
zone_dma32_is_empty(int node)26 static inline bool zone_dma32_is_empty(int node)
27 {
28 #ifdef CONFIG_ZONE_DMA32
29 pg_data_t *pgdat = NODE_DATA(node);
30
31 return zone_is_empty(&pgdat->node_zones[ZONE_DMA32]);
32 #else
33 return true;
34 #endif
35 }
36
zone_dma32_are_empty(void)37 static inline bool zone_dma32_are_empty(void)
38 {
39 #ifdef CONFIG_NUMA
40 int node;
41
42 for_each_node(node)
43 if (!zone_dma32_is_empty(node))
44 return false;
45 #else
46 if (!zone_dma32_is_empty(numa_node_id()))
47 return false;
48 #endif
49
50 return true;
51 }
52
translate_phys_to_dma(struct device * dev,phys_addr_t paddr)53 static inline dma_addr_t translate_phys_to_dma(struct device *dev,
54 phys_addr_t paddr)
55 {
56 const struct bus_dma_region *m;
57
58 for (m = dev->dma_range_map; m->size; m++) {
59 u64 offset = paddr - m->cpu_start;
60
61 if (paddr >= m->cpu_start && offset < m->size)
62 return m->dma_start + offset;
63 }
64
65 /* make sure dma_capable fails when no translation is available */
66 return DMA_MAPPING_ERROR;
67 }
68
translate_dma_to_phys(struct device * dev,dma_addr_t dma_addr)69 static inline phys_addr_t translate_dma_to_phys(struct device *dev,
70 dma_addr_t dma_addr)
71 {
72 const struct bus_dma_region *m;
73
74 for (m = dev->dma_range_map; m->size; m++) {
75 u64 offset = dma_addr - m->dma_start;
76
77 if (dma_addr >= m->dma_start && offset < m->size)
78 return m->cpu_start + offset;
79 }
80
81 return (phys_addr_t)-1;
82 }
83
dma_range_map_min(const struct bus_dma_region * map)84 static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
85 {
86 dma_addr_t ret = (dma_addr_t)U64_MAX;
87
88 for (; map->size; map++)
89 ret = min(ret, map->dma_start);
90 return ret;
91 }
92
dma_range_map_max(const struct bus_dma_region * map)93 static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
94 {
95 dma_addr_t ret = 0;
96
97 for (; map->size; map++)
98 ret = max(ret, map->dma_start + map->size - 1);
99 return ret;
100 }
101
102 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
103 #include <asm/dma-direct.h>
104 #ifndef phys_to_dma_unencrypted
105 #define phys_to_dma_unencrypted phys_to_dma
106 #endif
107 #else
phys_to_dma_unencrypted(struct device * dev,phys_addr_t paddr)108 static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
109 phys_addr_t paddr)
110 {
111 if (dev->dma_range_map)
112 return translate_phys_to_dma(dev, paddr);
113 return paddr;
114 }
115
116 /*
117 * If memory encryption is supported, phys_to_dma will set the memory encryption
118 * bit in the DMA address, and dma_to_phys will clear it.
119 * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
120 * buffers.
121 */
phys_to_dma(struct device * dev,phys_addr_t paddr)122 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
123 {
124 return __sme_set(phys_to_dma_unencrypted(dev, paddr));
125 }
126
dma_to_phys(struct device * dev,dma_addr_t dma_addr)127 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
128 {
129 phys_addr_t paddr;
130
131 if (dev->dma_range_map)
132 paddr = translate_dma_to_phys(dev, dma_addr);
133 else
134 paddr = dma_addr;
135
136 return __sme_clr(paddr);
137 }
138 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
139
140 #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
141 bool force_dma_unencrypted(struct device *dev);
142 #else
force_dma_unencrypted(struct device * dev)143 static inline bool force_dma_unencrypted(struct device *dev)
144 {
145 return false;
146 }
147 #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
148
dma_capable(struct device * dev,dma_addr_t addr,size_t size,bool is_ram)149 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
150 bool is_ram)
151 {
152 dma_addr_t end = addr + size - 1;
153
154 if (addr == DMA_MAPPING_ERROR)
155 return false;
156 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
157 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
158 return false;
159
160 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
161 }
162
163 u64 dma_direct_get_required_mask(struct device *dev);
164 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
165 gfp_t gfp, unsigned long attrs);
166 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
167 dma_addr_t dma_addr, unsigned long attrs);
168 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
169 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
170 void dma_direct_free_pages(struct device *dev, size_t size,
171 struct page *page, dma_addr_t dma_addr,
172 enum dma_data_direction dir);
173 int dma_direct_supported(struct device *dev, u64 mask);
174 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
175 size_t size, enum dma_data_direction dir, unsigned long attrs);
176
177 #endif /* _LINUX_DMA_DIRECT_H */
178