• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _M68K_DMA_MAPPING_H
2 #define _M68K_DMA_MAPPING_H
3 
4 #include <asm/cache.h>
5 
6 struct scatterlist;
7 
dma_supported(struct device * dev,u64 mask)8 static inline int dma_supported(struct device *dev, u64 mask)
9 {
10 	return 1;
11 }
12 
dma_set_mask(struct device * dev,u64 mask)13 static inline int dma_set_mask(struct device *dev, u64 mask)
14 {
15 	return 0;
16 }
17 
18 extern void *dma_alloc_coherent(struct device *, size_t,
19 				dma_addr_t *, gfp_t);
20 extern void dma_free_coherent(struct device *, size_t,
21 			      void *, dma_addr_t);
22 
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,struct dma_attrs * attrs)23 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
24 				    dma_addr_t *dma_handle, gfp_t flag,
25 				    struct dma_attrs *attrs)
26 {
27 	/* attrs is not supported and ignored */
28 	return dma_alloc_coherent(dev, size, dma_handle, flag);
29 }
30 
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,struct dma_attrs * attrs)31 static inline void dma_free_attrs(struct device *dev, size_t size,
32 				  void *cpu_addr, dma_addr_t dma_handle,
33 				  struct dma_attrs *attrs)
34 {
35 	/* attrs is not supported and ignored */
36 	dma_free_coherent(dev, size, cpu_addr, dma_handle);
37 }
38 
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,gfp_t flag)39 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
40 					  dma_addr_t *handle, gfp_t flag)
41 {
42 	return dma_alloc_coherent(dev, size, handle, flag);
43 }
dma_free_noncoherent(struct device * dev,size_t size,void * addr,dma_addr_t handle)44 static inline void dma_free_noncoherent(struct device *dev, size_t size,
45 					void *addr, dma_addr_t handle)
46 {
47 	dma_free_coherent(dev, size, addr, handle);
48 }
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction dir)49 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
50 				  enum dma_data_direction dir)
51 {
52 	/* we use coherent allocation, so not much to do here. */
53 }
54 
55 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
56 				 enum dma_data_direction);
dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)57 static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 				    size_t size, enum dma_data_direction dir)
59 {
60 }
61 
62 extern dma_addr_t dma_map_page(struct device *, struct page *,
63 			       unsigned long, size_t size,
64 			       enum dma_data_direction);
dma_unmap_page(struct device * dev,dma_addr_t address,size_t size,enum dma_data_direction dir)65 static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
66 				  size_t size, enum dma_data_direction dir)
67 {
68 }
69 
70 extern int dma_map_sg(struct device *, struct scatterlist *, int,
71 		      enum dma_data_direction);
dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nhwentries,enum dma_data_direction dir)72 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
73 				int nhwentries, enum dma_data_direction dir)
74 {
75 }
76 
77 extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
78 				       enum dma_data_direction);
79 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
80 				   enum dma_data_direction);
81 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)82 static inline void dma_sync_single_range_for_device(struct device *dev,
83 		dma_addr_t dma_handle, unsigned long offset, size_t size,
84 		enum dma_data_direction direction)
85 {
86 	/* just sync everything for now */
87 	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
88 }
89 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)90 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
91 					   size_t size, enum dma_data_direction dir)
92 {
93 }
94 
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)95 static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
96 				       int nents, enum dma_data_direction dir)
97 {
98 }
99 
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)100 static inline void dma_sync_single_range_for_cpu(struct device *dev,
101 		dma_addr_t dma_handle, unsigned long offset, size_t size,
102 		enum dma_data_direction direction)
103 {
104 	/* just sync everything for now */
105 	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
106 }
107 
dma_mapping_error(struct device * dev,dma_addr_t handle)108 static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
109 {
110 	return 0;
111 }
112 
113 /* drivers/base/dma-mapping.c */
114 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
115 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
116 extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
117 				  void *cpu_addr, dma_addr_t dma_addr,
118 				  size_t size);
119 
120 #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
121 #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
122 
123 #endif  /* _M68K_DMA_MAPPING_H */
124