• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * DMA mapping support for platforms lacking IOMMUs.
3  *
4  * Copyright (C) 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/dma-mapping.h>
11 #include <linux/io.h>
12 
nommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)13 static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
14 				 unsigned long offset, size_t size,
15 				 enum dma_data_direction dir,
16 				 unsigned long attrs)
17 {
18 	dma_addr_t addr = page_to_phys(page) + offset;
19 
20 	WARN_ON(size == 0);
21 
22 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
23 		dma_cache_sync(dev, page_address(page) + offset, size, dir);
24 
25 	return addr;
26 }
27 
nommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)28 static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
29 			int nents, enum dma_data_direction dir,
30 			unsigned long attrs)
31 {
32 	struct scatterlist *s;
33 	int i;
34 
35 	WARN_ON(nents == 0 || sg[0].length == 0);
36 
37 	for_each_sg(sg, s, nents, i) {
38 		BUG_ON(!sg_page(s));
39 
40 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
41 			dma_cache_sync(dev, sg_virt(s), s->length, dir);
42 
43 		s->dma_address = sg_phys(s);
44 		s->dma_length = s->length;
45 	}
46 
47 	return nents;
48 }
49 
50 #ifdef CONFIG_DMA_NONCOHERENT
nommu_sync_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)51 static void nommu_sync_single(struct device *dev, dma_addr_t addr,
52 			      size_t size, enum dma_data_direction dir)
53 {
54 	dma_cache_sync(dev, phys_to_virt(addr), size, dir);
55 }
56 
nommu_sync_sg(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)57 static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
58 			  int nelems, enum dma_data_direction dir)
59 {
60 	struct scatterlist *s;
61 	int i;
62 
63 	for_each_sg(sg, s, nelems, i)
64 		dma_cache_sync(dev, sg_virt(s), s->length, dir);
65 }
66 #endif
67 
68 const struct dma_map_ops nommu_dma_ops = {
69 	.alloc			= dma_generic_alloc_coherent,
70 	.free			= dma_generic_free_coherent,
71 	.map_page		= nommu_map_page,
72 	.map_sg			= nommu_map_sg,
73 #ifdef CONFIG_DMA_NONCOHERENT
74 	.sync_single_for_device	= nommu_sync_single,
75 	.sync_sg_for_device	= nommu_sync_sg,
76 #endif
77 	.is_phys		= 1,
78 };
79 
no_iommu_init(void)80 void __init no_iommu_init(void)
81 {
82 	if (dma_ops)
83 		return;
84 	dma_ops = &nommu_dma_ops;
85 }
86