• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* DMA mapping routines for the MN10300 arch
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_DMA_MAPPING_H
12 #define _ASM_DMA_MAPPING_H
13 
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 
17 #include <asm/cache.h>
18 #include <asm/io.h>
19 
20 /*
21  * See Documentation/DMA-API.txt for the description of how the
22  * following DMA API should work.
23  */
24 
25 extern void *dma_alloc_coherent(struct device *dev, size_t size,
26 				dma_addr_t *dma_handle, int flag);
27 
28 extern void dma_free_coherent(struct device *dev, size_t size,
29 			      void *vaddr, dma_addr_t dma_handle);
30 
31 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
32 #define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
33 
34 static inline
dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction direction)35 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
36 			  enum dma_data_direction direction)
37 {
38 	BUG_ON(direction == DMA_NONE);
39 	mn10300_dcache_flush_inv();
40 	return virt_to_bus(ptr);
41 }
42 
43 static inline
dma_unmap_single(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)44 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45 		      enum dma_data_direction direction)
46 {
47 	BUG_ON(direction == DMA_NONE);
48 }
49 
50 static inline
dma_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)51 int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
52 	       enum dma_data_direction direction)
53 {
54 	struct scatterlist *sg;
55 	int i;
56 
57 	BUG_ON(!valid_dma_direction(direction));
58 	WARN_ON(nents == 0 || sglist[0].length == 0);
59 
60 	for_each_sg(sglist, sg, nents, i) {
61 		BUG_ON(!sg_page(sg));
62 
63 		sg->dma_address = sg_phys(sg);
64 	}
65 
66 	mn10300_dcache_flush_inv();
67 	return nents;
68 }
69 
70 static inline
dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nhwentries,enum dma_data_direction direction)71 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
72 		  enum dma_data_direction direction)
73 {
74 	BUG_ON(!valid_dma_direction(direction));
75 }
76 
77 static inline
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)78 dma_addr_t dma_map_page(struct device *dev, struct page *page,
79 			unsigned long offset, size_t size,
80 			enum dma_data_direction direction)
81 {
82 	BUG_ON(direction == DMA_NONE);
83 	return page_to_bus(page) + offset;
84 }
85 
86 static inline
dma_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction)87 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
88 		    enum dma_data_direction direction)
89 {
90 	BUG_ON(direction == DMA_NONE);
91 }
92 
93 static inline
dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)94 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
95 			     size_t size, enum dma_data_direction direction)
96 {
97 }
98 
99 static inline
dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)100 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
101 				size_t size, enum dma_data_direction direction)
102 {
103 	mn10300_dcache_flush_inv();
104 }
105 
106 static inline
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)107 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
108 				   unsigned long offset, size_t size,
109 				   enum dma_data_direction direction)
110 {
111 }
112 
113 static inline void
dma_sync_single_range_for_device(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)114 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
115 				 unsigned long offset, size_t size,
116 				 enum dma_data_direction direction)
117 {
118 	mn10300_dcache_flush_inv();
119 }
120 
121 
122 static inline
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)123 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
124 			 int nelems, enum dma_data_direction direction)
125 {
126 }
127 
128 static inline
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction direction)129 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
130 			    int nelems, enum dma_data_direction direction)
131 {
132 	mn10300_dcache_flush_inv();
133 }
134 
135 static inline
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)136 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
137 {
138 	return 0;
139 }
140 
141 static inline
dma_supported(struct device * dev,u64 mask)142 int dma_supported(struct device *dev, u64 mask)
143 {
144 	/*
145 	 * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
146 	 * guarantee allocations that must be within a tighter range than
147 	 * GFP_DMA
148 	 */
149 	if (mask < 0x00ffffff)
150 		return 0;
151 	return 1;
152 }
153 
154 static inline
dma_set_mask(struct device * dev,u64 mask)155 int dma_set_mask(struct device *dev, u64 mask)
156 {
157 	if (!dev->dma_mask || !dma_supported(dev, mask))
158 		return -EIO;
159 
160 	*dev->dma_mask = mask;
161 	return 0;
162 }
163 
164 static inline
dma_cache_sync(void * vaddr,size_t size,enum dma_data_direction direction)165 void dma_cache_sync(void *vaddr, size_t size,
166 		    enum dma_data_direction direction)
167 {
168 	mn10300_dcache_flush_inv();
169 }
170 
171 /* Not supported for now */
dma_mmap_coherent(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)172 static inline int dma_mmap_coherent(struct device *dev,
173 				    struct vm_area_struct *vma, void *cpu_addr,
174 				    dma_addr_t dma_addr, size_t size)
175 {
176 	return -EINVAL;
177 }
178 
dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size)179 static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
180 				  void *cpu_addr, dma_addr_t dma_addr,
181 				  size_t size)
182 {
183 	return -EINVAL;
184 }
185 
186 #endif
187