• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_METAG_DMA_MAPPING_H
2 #define _ASM_METAG_DMA_MAPPING_H
3 
4 #include <linux/mm.h>
5 
6 #include <asm/cache.h>
7 #include <asm/io.h>
8 #include <linux/scatterlist.h>
9 #include <asm/bug.h>
10 
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13 
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15 			 dma_addr_t *dma_handle, gfp_t flag);
16 
17 void dma_free_coherent(struct device *dev, size_t size,
18 		       void *vaddr, dma_addr_t dma_handle);
19 
20 void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
21 void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
22 
23 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
24 		      void *cpu_addr, dma_addr_t dma_addr, size_t size);
25 
26 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
27 			  void *cpu_addr, dma_addr_t dma_addr, size_t size);
28 
29 static inline dma_addr_t
dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction direction)30 dma_map_single(struct device *dev, void *ptr, size_t size,
31 	       enum dma_data_direction direction)
32 {
33 	BUG_ON(!valid_dma_direction(direction));
34 	WARN_ON(size == 0);
35 	dma_sync_for_device(ptr, size, direction);
36 	return virt_to_phys(ptr);
37 }
38 
39 static inline void
dma_unmap_single(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)40 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
41 		 enum dma_data_direction direction)
42 {
43 	BUG_ON(!valid_dma_direction(direction));
44 	dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
45 }
46 
47 static inline int
dma_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)48 dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
49 	   enum dma_data_direction direction)
50 {
51 	struct scatterlist *sg;
52 	int i;
53 
54 	BUG_ON(!valid_dma_direction(direction));
55 	WARN_ON(nents == 0 || sglist[0].length == 0);
56 
57 	for_each_sg(sglist, sg, nents, i) {
58 		BUG_ON(!sg_page(sg));
59 
60 		sg->dma_address = sg_phys(sg);
61 		dma_sync_for_device(sg_virt(sg), sg->length, direction);
62 	}
63 
64 	return nents;
65 }
66 
67 static inline dma_addr_t
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)68 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
69 	     size_t size, enum dma_data_direction direction)
70 {
71 	BUG_ON(!valid_dma_direction(direction));
72 	dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
73 			    direction);
74 	return page_to_phys(page) + offset;
75 }
76 
77 static inline void
dma_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction)78 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79 	       enum dma_data_direction direction)
80 {
81 	BUG_ON(!valid_dma_direction(direction));
82 	dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
83 }
84 
85 
86 static inline void
dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nhwentries,enum dma_data_direction direction)87 dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
88 	     enum dma_data_direction direction)
89 {
90 	struct scatterlist *sg;
91 	int i;
92 
93 	BUG_ON(!valid_dma_direction(direction));
94 	WARN_ON(nhwentries == 0 || sglist[0].length == 0);
95 
96 	for_each_sg(sglist, sg, nhwentries, i) {
97 		BUG_ON(!sg_page(sg));
98 
99 		sg->dma_address = sg_phys(sg);
100 		dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
101 	}
102 }
103 
104 static inline void
dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)105 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106 			enum dma_data_direction direction)
107 {
108 	dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
109 }
110 
111 static inline void
dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)112 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
113 			   size_t size, enum dma_data_direction direction)
114 {
115 	dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
116 }
117 
118 static inline void
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)119 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
120 			      unsigned long offset, size_t size,
121 			      enum dma_data_direction direction)
122 {
123 	dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
124 			 direction);
125 }
126 
127 static inline void
dma_sync_single_range_for_device(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)128 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
129 				 unsigned long offset, size_t size,
130 				 enum dma_data_direction direction)
131 {
132 	dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
133 			    direction);
134 }
135 
136 static inline void
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction)137 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
138 		    enum dma_data_direction direction)
139 {
140 	int i;
141 	struct scatterlist *sg;
142 
143 	for_each_sg(sglist, sg, nelems, i)
144 		dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
145 }
146 
147 static inline void
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction)148 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
149 		       int nelems, enum dma_data_direction direction)
150 {
151 	int i;
152 	struct scatterlist *sg;
153 
154 	for_each_sg(sglist, sg, nelems, i)
155 		dma_sync_for_device(sg_virt(sg), sg->length, direction);
156 }
157 
158 static inline int
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)159 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
160 {
161 	return 0;
162 }
163 
164 #define dma_supported(dev, mask)        (1)
165 
166 static inline int
dma_set_mask(struct device * dev,u64 mask)167 dma_set_mask(struct device *dev, u64 mask)
168 {
169 	if (!dev->dma_mask || !dma_supported(dev, mask))
170 		return -EIO;
171 
172 	*dev->dma_mask = mask;
173 
174 	return 0;
175 }
176 
177 /*
178  * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
179  * do any flushing here.
180  */
181 static inline void
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction direction)182 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
183 	       enum dma_data_direction direction)
184 {
185 }
186 
187 /* drivers/base/dma-mapping.c */
188 extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
189 				  void *cpu_addr, dma_addr_t dma_addr,
190 				  size_t size);
191 
192 #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
193 
194 #endif
195