1 /*
2 * DMA coherent memory allocation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 * Copyright (C) 2015 Cadence Design Systems Inc.
11 *
12 * Based on version for i386.
13 *
14 * Chris Zankel <chris@zankel.net>
15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 */
17
18 #include <linux/gfp.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <asm/cacheflush.h>
26 #include <asm/io.h>
27
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction dir)28 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
29 enum dma_data_direction dir)
30 {
31 switch (dir) {
32 case DMA_BIDIRECTIONAL:
33 __flush_invalidate_dcache_range((unsigned long)vaddr, size);
34 break;
35
36 case DMA_FROM_DEVICE:
37 __invalidate_dcache_range((unsigned long)vaddr, size);
38 break;
39
40 case DMA_TO_DEVICE:
41 __flush_dcache_range((unsigned long)vaddr, size);
42 break;
43
44 case DMA_NONE:
45 BUG();
46 break;
47 }
48 }
49 EXPORT_SYMBOL(dma_cache_sync);
50
do_cache_op(dma_addr_t dma_handle,size_t size,void (* fn)(unsigned long,unsigned long))51 static void do_cache_op(dma_addr_t dma_handle, size_t size,
52 void (*fn)(unsigned long, unsigned long))
53 {
54 unsigned long off = dma_handle & (PAGE_SIZE - 1);
55 unsigned long pfn = PFN_DOWN(dma_handle);
56 struct page *page = pfn_to_page(pfn);
57
58 if (!PageHighMem(page))
59 fn((unsigned long)bus_to_virt(dma_handle), size);
60 else
61 while (size > 0) {
62 size_t sz = min_t(size_t, size, PAGE_SIZE - off);
63 void *vaddr = kmap_atomic(page);
64
65 fn((unsigned long)vaddr + off, sz);
66 kunmap_atomic(vaddr);
67 off = 0;
68 ++page;
69 size -= sz;
70 }
71 }
72
xtensa_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)73 static void xtensa_sync_single_for_cpu(struct device *dev,
74 dma_addr_t dma_handle, size_t size,
75 enum dma_data_direction dir)
76 {
77 switch (dir) {
78 case DMA_BIDIRECTIONAL:
79 case DMA_FROM_DEVICE:
80 do_cache_op(dma_handle, size, __invalidate_dcache_range);
81 break;
82
83 case DMA_NONE:
84 BUG();
85 break;
86
87 default:
88 break;
89 }
90 }
91
xtensa_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)92 static void xtensa_sync_single_for_device(struct device *dev,
93 dma_addr_t dma_handle, size_t size,
94 enum dma_data_direction dir)
95 {
96 switch (dir) {
97 case DMA_BIDIRECTIONAL:
98 case DMA_TO_DEVICE:
99 if (XCHAL_DCACHE_IS_WRITEBACK)
100 do_cache_op(dma_handle, size, __flush_dcache_range);
101 break;
102
103 case DMA_NONE:
104 BUG();
105 break;
106
107 default:
108 break;
109 }
110 }
111
xtensa_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)112 static void xtensa_sync_sg_for_cpu(struct device *dev,
113 struct scatterlist *sg, int nents,
114 enum dma_data_direction dir)
115 {
116 struct scatterlist *s;
117 int i;
118
119 for_each_sg(sg, s, nents, i) {
120 xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
121 sg_dma_len(s), dir);
122 }
123 }
124
xtensa_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)125 static void xtensa_sync_sg_for_device(struct device *dev,
126 struct scatterlist *sg, int nents,
127 enum dma_data_direction dir)
128 {
129 struct scatterlist *s;
130 int i;
131
132 for_each_sg(sg, s, nents, i) {
133 xtensa_sync_single_for_device(dev, sg_dma_address(s),
134 sg_dma_len(s), dir);
135 }
136 }
137
138 /*
139 * Note: We assume that the full memory space is always mapped to 'kseg'
140 * Otherwise we have to use page attributes (not implemented).
141 */
142
xtensa_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t flag,struct dma_attrs * attrs)143 static void *xtensa_dma_alloc(struct device *dev, size_t size,
144 dma_addr_t *handle, gfp_t flag,
145 struct dma_attrs *attrs)
146 {
147 unsigned long ret;
148 unsigned long uncached = 0;
149
150 /* ignore region speicifiers */
151
152 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
153
154 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
155 flag |= GFP_DMA;
156 ret = (unsigned long)__get_free_pages(flag, get_order(size));
157
158 if (ret == 0)
159 return NULL;
160
161 /* We currently don't support coherent memory outside KSEG */
162
163 BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
164 ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
165
166 uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
167 *handle = virt_to_bus((void *)ret);
168 __invalidate_dcache_range(ret, size);
169
170 return (void *)uncached;
171 }
172
xtensa_dma_free(struct device * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle,struct dma_attrs * attrs)173 static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
174 dma_addr_t dma_handle, struct dma_attrs *attrs)
175 {
176 unsigned long addr = (unsigned long)vaddr +
177 XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
178
179 BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
180 addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
181
182 free_pages(addr, get_order(size));
183 }
184
xtensa_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)185 static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
186 unsigned long offset, size_t size,
187 enum dma_data_direction dir,
188 struct dma_attrs *attrs)
189 {
190 dma_addr_t dma_handle = page_to_phys(page) + offset;
191
192 xtensa_sync_single_for_device(dev, dma_handle, size, dir);
193 return dma_handle;
194 }
195
xtensa_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)196 static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
197 size_t size, enum dma_data_direction dir,
198 struct dma_attrs *attrs)
199 {
200 xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
201 }
202
xtensa_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)203 static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
204 int nents, enum dma_data_direction dir,
205 struct dma_attrs *attrs)
206 {
207 struct scatterlist *s;
208 int i;
209
210 for_each_sg(sg, s, nents, i) {
211 s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
212 s->length, dir, attrs);
213 }
214 return nents;
215 }
216
xtensa_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)217 static void xtensa_unmap_sg(struct device *dev,
218 struct scatterlist *sg, int nents,
219 enum dma_data_direction dir,
220 struct dma_attrs *attrs)
221 {
222 struct scatterlist *s;
223 int i;
224
225 for_each_sg(sg, s, nents, i) {
226 xtensa_unmap_page(dev, sg_dma_address(s),
227 sg_dma_len(s), dir, attrs);
228 }
229 }
230
xtensa_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)231 int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
232 {
233 return 0;
234 }
235
236 struct dma_map_ops xtensa_dma_map_ops = {
237 .alloc = xtensa_dma_alloc,
238 .free = xtensa_dma_free,
239 .map_page = xtensa_map_page,
240 .unmap_page = xtensa_unmap_page,
241 .map_sg = xtensa_map_sg,
242 .unmap_sg = xtensa_unmap_sg,
243 .sync_single_for_cpu = xtensa_sync_single_for_cpu,
244 .sync_single_for_device = xtensa_sync_single_for_device,
245 .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
246 .sync_sg_for_device = xtensa_sync_sg_for_device,
247 .mapping_error = xtensa_dma_mapping_error,
248 };
249 EXPORT_SYMBOL(xtensa_dma_map_ops);
250
251 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
252
xtensa_dma_init(void)253 static int __init xtensa_dma_init(void)
254 {
255 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
256 return 0;
257 }
258 fs_initcall(xtensa_dma_init);
259