1 #ifndef _ASM_DMA_MAPPING_H
2 #define _ASM_DMA_MAPPING_H
3
4 #include <linux/device.h>
5 #include <linux/scatterlist.h>
6 #include <asm/cache.h>
7 #include <asm/cacheflush.h>
8 #include <asm/io.h>
9
10 /*
11 * See Documentation/DMA-API.txt for the description of how the
12 * following DMA API should work.
13 */
14
15 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
16 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
17
18 extern unsigned long __nongprelbss dma_coherent_mem_start;
19 extern unsigned long __nongprelbss dma_coherent_mem_end;
20
21 void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
22 void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
23
24 extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
25 enum dma_data_direction direction);
26
27 static inline
dma_unmap_single(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)28 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
29 enum dma_data_direction direction)
30 {
31 BUG_ON(direction == DMA_NONE);
32 }
33
34 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
35 enum dma_data_direction direction);
36
37 static inline
dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nhwentries,enum dma_data_direction direction)38 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
39 enum dma_data_direction direction)
40 {
41 BUG_ON(direction == DMA_NONE);
42 }
43
44 extern
45 dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
46 size_t size, enum dma_data_direction direction);
47
48 static inline
dma_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction)49 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
50 enum dma_data_direction direction)
51 {
52 BUG_ON(direction == DMA_NONE);
53 }
54
55
56 static inline
dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)57 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
58 enum dma_data_direction direction)
59 {
60 }
61
62 static inline
dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)63 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
64 enum dma_data_direction direction)
65 {
66 flush_write_buffers();
67 }
68
69 static inline
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)70 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
71 unsigned long offset, size_t size,
72 enum dma_data_direction direction)
73 {
74 }
75
76 static inline
dma_sync_single_range_for_device(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)77 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
78 unsigned long offset, size_t size,
79 enum dma_data_direction direction)
80 {
81 flush_write_buffers();
82 }
83
84 static inline
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)85 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
86 enum dma_data_direction direction)
87 {
88 }
89
90 static inline
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)91 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
92 enum dma_data_direction direction)
93 {
94 flush_write_buffers();
95 }
96
97 static inline
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)98 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
99 {
100 return 0;
101 }
102
103 static inline
dma_supported(struct device * dev,u64 mask)104 int dma_supported(struct device *dev, u64 mask)
105 {
106 /*
107 * we fall back to GFP_DMA when the mask isn't all 1s,
108 * so we can't guarantee allocations that must be
109 * within a tighter range than GFP_DMA..
110 */
111 if (mask < 0x00ffffff)
112 return 0;
113
114 return 1;
115 }
116
117 static inline
dma_set_mask(struct device * dev,u64 mask)118 int dma_set_mask(struct device *dev, u64 mask)
119 {
120 if (!dev->dma_mask || !dma_supported(dev, mask))
121 return -EIO;
122
123 *dev->dma_mask = mask;
124
125 return 0;
126 }
127
128 static inline
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction direction)129 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction)
131 {
132 flush_write_buffers();
133 }
134
135 /* Not supported for now */
dma_mmap_coherent(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)136 static inline int dma_mmap_coherent(struct device *dev,
137 struct vm_area_struct *vma, void *cpu_addr,
138 dma_addr_t dma_addr, size_t size)
139 {
140 return -EINVAL;
141 }
142
dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size)143 static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
144 void *cpu_addr, dma_addr_t dma_addr,
145 size_t size)
146 {
147 return -EINVAL;
148 }
149
150 #endif /* _ASM_DMA_MAPPING_H */
151