• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
16 #include <linux/swiotlb.h>
17 
18 /*
19  * Most architectures use ZONE_DMA for the first 16 Megabytes, but
20  * some use it for entirely different regions:
21  */
22 #ifndef ARCH_ZONE_DMA_BITS
23 #define ARCH_ZONE_DMA_BITS 24
24 #endif
25 
report_addr(struct device * dev,dma_addr_t dma_addr,size_t size)26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
27 {
28 	if (!dev->dma_mask) {
29 		dev_err_once(dev, "DMA map on device without dma_mask\n");
30 	} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
31 		dev_err_once(dev,
32 			"overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
33 			&dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
34 	}
35 	WARN_ON_ONCE(1);
36 }
37 
phys_to_dma_direct(struct device * dev,phys_addr_t phys)38 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
39 		phys_addr_t phys)
40 {
41 	if (force_dma_unencrypted(dev))
42 		return __phys_to_dma(dev, phys);
43 	return phys_to_dma(dev, phys);
44 }
45 
dma_direct_get_required_mask(struct device * dev)46 u64 dma_direct_get_required_mask(struct device *dev)
47 {
48 	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
49 	u64 max_dma = phys_to_dma_direct(dev, phys);
50 
51 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
52 }
53 EXPORT_SYMBOL_GPL(dma_direct_get_required_mask);
54 
__dma_direct_optimal_gfp_mask(struct device * dev,u64 dma_mask,u64 * phys_mask)55 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
56 		u64 *phys_mask)
57 {
58 	if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
59 		dma_mask = dev->bus_dma_mask;
60 
61 	if (force_dma_unencrypted(dev))
62 		*phys_mask = __dma_to_phys(dev, dma_mask);
63 	else
64 		*phys_mask = dma_to_phys(dev, dma_mask);
65 
66 	/*
67 	 * Optimistically try the zone that the physical address mask falls
68 	 * into first.  If that returns memory that isn't actually addressable
69 	 * we will fallback to the next lower zone and try again.
70 	 *
71 	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
72 	 * zones.
73 	 */
74 	if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
75 		return GFP_DMA;
76 	if (*phys_mask <= DMA_BIT_MASK(32))
77 		return GFP_DMA32;
78 	return 0;
79 }
80 
dma_coherent_ok(struct device * dev,phys_addr_t phys,size_t size)81 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
82 {
83 	return phys_to_dma_direct(dev, phys) + size - 1 <=
84 			min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
85 }
86 
__dma_direct_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)87 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
88 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
89 {
90 	size_t alloc_size = PAGE_ALIGN(size);
91 	int node = dev_to_node(dev);
92 	struct page *page = NULL;
93 	u64 phys_mask;
94 
95 	if (attrs & DMA_ATTR_NO_WARN)
96 		gfp |= __GFP_NOWARN;
97 
98 	/* we always manually zero the memory once we are done: */
99 	gfp &= ~__GFP_ZERO;
100 	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
101 			&phys_mask);
102 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
103 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
104 		dma_free_contiguous(dev, page, alloc_size);
105 		page = NULL;
106 	}
107 again:
108 	if (!page)
109 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
110 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
111 		dma_free_contiguous(dev, page, size);
112 		page = NULL;
113 
114 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
115 		    phys_mask < DMA_BIT_MASK(64) &&
116 		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
117 			gfp |= GFP_DMA32;
118 			goto again;
119 		}
120 
121 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
122 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
123 			goto again;
124 		}
125 	}
126 
127 	return page;
128 }
129 
dma_direct_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)130 void *dma_direct_alloc_pages(struct device *dev, size_t size,
131 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
132 {
133 	struct page *page;
134 	void *ret;
135 
136 	page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
137 	if (!page)
138 		return NULL;
139 
140 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
141 	    !force_dma_unencrypted(dev)) {
142 		/* remove any dirty cache lines on the kernel alias */
143 		if (!PageHighMem(page))
144 			arch_dma_prep_coherent(page, size);
145 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
146 		/* return the page pointer as the opaque cookie */
147 		return page;
148 	}
149 
150 	if (PageHighMem(page)) {
151 		/*
152 		 * Depending on the cma= arguments and per-arch setup
153 		 * dma_alloc_contiguous could return highmem pages.
154 		 * Without remapping there is no way to return them here,
155 		 * so log an error and fail.
156 		 */
157 		dev_info(dev, "Rejecting highmem page from CMA.\n");
158 		__dma_direct_free_pages(dev, size, page);
159 		return NULL;
160 	}
161 
162 	ret = page_address(page);
163 	if (force_dma_unencrypted(dev)) {
164 		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
165 		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
166 	} else {
167 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
168 	}
169 	memset(ret, 0, size);
170 
171 	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
172 	    dma_alloc_need_uncached(dev, attrs)) {
173 		arch_dma_prep_coherent(page, size);
174 		ret = uncached_kernel_address(ret);
175 	}
176 
177 	return ret;
178 }
179 
__dma_direct_free_pages(struct device * dev,size_t size,struct page * page)180 void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
181 {
182 	dma_free_contiguous(dev, page, size);
183 }
184 
dma_direct_free_pages(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)185 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
186 		dma_addr_t dma_addr, unsigned long attrs)
187 {
188 	unsigned int page_order = get_order(size);
189 
190 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
191 	    !force_dma_unencrypted(dev)) {
192 		/* cpu_addr is a struct page cookie, not a kernel address */
193 		__dma_direct_free_pages(dev, size, cpu_addr);
194 		return;
195 	}
196 
197 	if (force_dma_unencrypted(dev))
198 		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
199 
200 	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
201 	    dma_alloc_need_uncached(dev, attrs))
202 		cpu_addr = cached_kernel_address(cpu_addr);
203 	__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
204 }
205 
dma_direct_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)206 void *dma_direct_alloc(struct device *dev, size_t size,
207 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
208 {
209 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
210 	    dma_alloc_need_uncached(dev, attrs))
211 		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
212 	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
213 }
214 EXPORT_SYMBOL_GPL(dma_direct_alloc);
215 
dma_direct_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)216 void dma_direct_free(struct device *dev, size_t size,
217 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
218 {
219 	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
220 	    dma_alloc_need_uncached(dev, attrs))
221 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
222 	else
223 		dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
224 }
225 EXPORT_SYMBOL_GPL(dma_direct_free);
226 
227 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
228     defined(CONFIG_SWIOTLB)
dma_direct_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)229 void dma_direct_sync_single_for_device(struct device *dev,
230 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
231 {
232 	phys_addr_t paddr = dma_to_phys(dev, addr);
233 
234 	if (unlikely(is_swiotlb_buffer(paddr)))
235 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
236 
237 	if (!dev_is_dma_coherent(dev))
238 		arch_sync_dma_for_device(paddr, size, dir);
239 }
240 EXPORT_SYMBOL(dma_direct_sync_single_for_device);
241 
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)242 void dma_direct_sync_sg_for_device(struct device *dev,
243 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
244 {
245 	struct scatterlist *sg;
246 	int i;
247 
248 	for_each_sg(sgl, sg, nents, i) {
249 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
250 
251 		if (unlikely(is_swiotlb_buffer(paddr)))
252 			swiotlb_tbl_sync_single(dev, paddr, sg->length,
253 					dir, SYNC_FOR_DEVICE);
254 
255 		if (!dev_is_dma_coherent(dev))
256 			arch_sync_dma_for_device(paddr, sg->length,
257 					dir);
258 	}
259 }
260 EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
261 #endif
262 
263 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
264     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
265     defined(CONFIG_SWIOTLB)
dma_direct_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)266 void dma_direct_sync_single_for_cpu(struct device *dev,
267 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
268 {
269 	phys_addr_t paddr = dma_to_phys(dev, addr);
270 
271 	if (!dev_is_dma_coherent(dev)) {
272 		arch_sync_dma_for_cpu(paddr, size, dir);
273 		arch_sync_dma_for_cpu_all();
274 	}
275 
276 	if (unlikely(is_swiotlb_buffer(paddr)))
277 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
278 }
279 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
280 
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)281 void dma_direct_sync_sg_for_cpu(struct device *dev,
282 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
283 {
284 	struct scatterlist *sg;
285 	int i;
286 
287 	for_each_sg(sgl, sg, nents, i) {
288 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
289 
290 		if (!dev_is_dma_coherent(dev))
291 			arch_sync_dma_for_cpu(paddr, sg->length, dir);
292 
293 		if (unlikely(is_swiotlb_buffer(paddr)))
294 			swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
295 					SYNC_FOR_CPU);
296 	}
297 
298 	if (!dev_is_dma_coherent(dev))
299 		arch_sync_dma_for_cpu_all();
300 }
301 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
302 
dma_direct_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)303 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
304 		size_t size, enum dma_data_direction dir, unsigned long attrs)
305 {
306 	phys_addr_t phys = dma_to_phys(dev, addr);
307 
308 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
309 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
310 
311 	if (unlikely(is_swiotlb_buffer(phys)))
312 		swiotlb_tbl_unmap_single(dev, phys, size, size, dir,
313 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
314 }
315 EXPORT_SYMBOL(dma_direct_unmap_page);
316 
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)317 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
318 		int nents, enum dma_data_direction dir, unsigned long attrs)
319 {
320 	struct scatterlist *sg;
321 	int i;
322 
323 	for_each_sg(sgl, sg, nents, i)
324 		dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
325 			     attrs);
326 }
327 EXPORT_SYMBOL(dma_direct_unmap_sg);
328 #endif
329 
dma_direct_possible(struct device * dev,dma_addr_t dma_addr,size_t size)330 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
331 		size_t size)
332 {
333 	return swiotlb_force != SWIOTLB_FORCE &&
334 		dma_capable(dev, dma_addr, size);
335 }
336 
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)337 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
338 		unsigned long offset, size_t size, enum dma_data_direction dir,
339 		unsigned long attrs)
340 {
341 	phys_addr_t phys = page_to_phys(page) + offset;
342 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
343 
344 	if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
345 	    !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
346 		report_addr(dev, dma_addr, size);
347 		return DMA_MAPPING_ERROR;
348 	}
349 
350 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
351 		arch_sync_dma_for_device(phys, size, dir);
352 	return dma_addr;
353 }
354 EXPORT_SYMBOL(dma_direct_map_page);
355 
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)356 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
357 		enum dma_data_direction dir, unsigned long attrs)
358 {
359 	int i;
360 	struct scatterlist *sg;
361 
362 	for_each_sg(sgl, sg, nents, i) {
363 		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
364 				sg->offset, sg->length, dir, attrs);
365 		if (sg->dma_address == DMA_MAPPING_ERROR)
366 			goto out_unmap;
367 		sg_dma_len(sg) = sg->length;
368 	}
369 
370 	return nents;
371 
372 out_unmap:
373 	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
374 	return 0;
375 }
376 EXPORT_SYMBOL(dma_direct_map_sg);
377 
dma_direct_map_resource(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir,unsigned long attrs)378 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
379 		size_t size, enum dma_data_direction dir, unsigned long attrs)
380 {
381 	dma_addr_t dma_addr = paddr;
382 
383 	if (unlikely(!dma_capable(dev, dma_addr, size))) {
384 		report_addr(dev, dma_addr, size);
385 		return DMA_MAPPING_ERROR;
386 	}
387 
388 	return dma_addr;
389 }
390 EXPORT_SYMBOL(dma_direct_map_resource);
391 
392 /*
393  * Because 32-bit DMA masks are so common we expect every architecture to be
394  * able to satisfy them - either by not supporting more physical memory, or by
395  * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
396  * use an IOMMU instead of the direct mapping.
397  */
dma_direct_supported(struct device * dev,u64 mask)398 int dma_direct_supported(struct device *dev, u64 mask)
399 {
400 	u64 min_mask;
401 
402 	if (IS_ENABLED(CONFIG_ZONE_DMA))
403 		min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
404 	else
405 		min_mask = DMA_BIT_MASK(32);
406 
407 	min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
408 
409 	/*
410 	 * This check needs to be against the actual bit mask value, so
411 	 * use __phys_to_dma() here so that the SME encryption mask isn't
412 	 * part of the check.
413 	 */
414 	return mask >= __phys_to_dma(dev, min_mask);
415 }
416 
dma_direct_max_mapping_size(struct device * dev)417 size_t dma_direct_max_mapping_size(struct device *dev)
418 {
419 	/* If SWIOTLB is active, use its maximum mapping size */
420 	if (is_swiotlb_active() &&
421 	    (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
422 		return swiotlb_max_mapping_size(dev);
423 	return SIZE_MAX;
424 }
425