1 /*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
38 #include <linux/bootmem.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/export.h>
41 #include <xen/swiotlb-xen.h>
42 #include <xen/page.h>
43 #include <xen/xen-ops.h>
44 #include <xen/hvc-console.h>
45
46 #include <asm/dma-mapping.h>
47 #include <asm/xen/page-coherent.h>
48
49 #include <trace/events/swiotlb.h>
50 /*
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
54 */
55
56 #ifndef CONFIG_X86
dma_alloc_coherent_mask(struct device * dev,gfp_t gfp)57 static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59 {
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67 }
68 #endif
69
70 static char *xen_io_tlb_start, *xen_io_tlb_end;
71 static unsigned long xen_io_tlb_nslabs;
72 /*
73 * Quick lookup value of the bus address of the IOTLB.
74 */
75
76 static u64 start_dma_addr;
77
78 /*
79 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
80 * can be 32bit when dma_addr_t is 64bit leading to a loss in
81 * information if the shift is done before casting to 64bit.
82 */
xen_phys_to_bus(phys_addr_t paddr)83 static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
84 {
85 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
86 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
87
88 dma |= paddr & ~XEN_PAGE_MASK;
89
90 return dma;
91 }
92
xen_bus_to_phys(dma_addr_t baddr)93 static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
94 {
95 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
96 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
97 phys_addr_t paddr = dma;
98
99 paddr |= baddr & ~XEN_PAGE_MASK;
100
101 return paddr;
102 }
103
xen_virt_to_bus(void * address)104 static inline dma_addr_t xen_virt_to_bus(void *address)
105 {
106 return xen_phys_to_bus(virt_to_phys(address));
107 }
108
check_pages_physically_contiguous(unsigned long xen_pfn,unsigned int offset,size_t length)109 static int check_pages_physically_contiguous(unsigned long xen_pfn,
110 unsigned int offset,
111 size_t length)
112 {
113 unsigned long next_bfn;
114 int i;
115 int nr_pages;
116
117 next_bfn = pfn_to_bfn(xen_pfn);
118 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
119
120 for (i = 1; i < nr_pages; i++) {
121 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
122 return 0;
123 }
124 return 1;
125 }
126
range_straddles_page_boundary(phys_addr_t p,size_t size)127 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
128 {
129 unsigned long xen_pfn = XEN_PFN_DOWN(p);
130 unsigned int offset = p & ~XEN_PAGE_MASK;
131
132 if (offset + size <= XEN_PAGE_SIZE)
133 return 0;
134 if (check_pages_physically_contiguous(xen_pfn, offset, size))
135 return 0;
136 return 1;
137 }
138
is_xen_swiotlb_buffer(dma_addr_t dma_addr)139 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140 {
141 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
142 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
143 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
144
145 /* If the address is outside our domain, it CAN
146 * have the same virtual address as another address
147 * in our domain. Therefore _only_ check address within our domain.
148 */
149 if (pfn_valid(PFN_DOWN(paddr))) {
150 return paddr >= virt_to_phys(xen_io_tlb_start) &&
151 paddr < virt_to_phys(xen_io_tlb_end);
152 }
153 return 0;
154 }
155
156 static int max_dma_bits = 32;
157
158 static int
xen_swiotlb_fixup(void * buf,size_t size,unsigned long nslabs)159 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
160 {
161 int i, rc;
162 int dma_bits;
163 dma_addr_t dma_handle;
164 phys_addr_t p = virt_to_phys(buf);
165
166 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
167
168 i = 0;
169 do {
170 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
171
172 do {
173 rc = xen_create_contiguous_region(
174 p + (i << IO_TLB_SHIFT),
175 get_order(slabs << IO_TLB_SHIFT),
176 dma_bits, &dma_handle);
177 } while (rc && dma_bits++ < max_dma_bits);
178 if (rc)
179 return rc;
180
181 i += slabs;
182 } while (i < nslabs);
183 return 0;
184 }
xen_set_nslabs(unsigned long nr_tbl)185 static unsigned long xen_set_nslabs(unsigned long nr_tbl)
186 {
187 if (!nr_tbl) {
188 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
189 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
190 } else
191 xen_io_tlb_nslabs = nr_tbl;
192
193 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
194 }
195
196 enum xen_swiotlb_err {
197 XEN_SWIOTLB_UNKNOWN = 0,
198 XEN_SWIOTLB_ENOMEM,
199 XEN_SWIOTLB_EFIXUP
200 };
201
xen_swiotlb_error(enum xen_swiotlb_err err)202 static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
203 {
204 switch (err) {
205 case XEN_SWIOTLB_ENOMEM:
206 return "Cannot allocate Xen-SWIOTLB buffer\n";
207 case XEN_SWIOTLB_EFIXUP:
208 return "Failed to get contiguous memory for DMA from Xen!\n"\
209 "You either: don't have the permissions, do not have"\
210 " enough free memory under 4GB, or the hypervisor memory"\
211 " is too fragmented!";
212 default:
213 break;
214 }
215 return "";
216 }
xen_swiotlb_init(int verbose,bool early)217 int __ref xen_swiotlb_init(int verbose, bool early)
218 {
219 unsigned long bytes, order;
220 int rc = -ENOMEM;
221 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
222 unsigned int repeat = 3;
223
224 xen_io_tlb_nslabs = swiotlb_nr_tbl();
225 retry:
226 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
227 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
228 /*
229 * Get IO TLB memory from any location.
230 */
231 if (early)
232 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
233 else {
234 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
235 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
236 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
237 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
238 if (xen_io_tlb_start)
239 break;
240 order--;
241 }
242 if (order != get_order(bytes)) {
243 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
244 (PAGE_SIZE << order) >> 20);
245 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
246 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
247 }
248 }
249 if (!xen_io_tlb_start) {
250 m_ret = XEN_SWIOTLB_ENOMEM;
251 goto error;
252 }
253 xen_io_tlb_end = xen_io_tlb_start + bytes;
254 /*
255 * And replace that memory with pages under 4GB.
256 */
257 rc = xen_swiotlb_fixup(xen_io_tlb_start,
258 bytes,
259 xen_io_tlb_nslabs);
260 if (rc) {
261 if (early)
262 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
263 else {
264 free_pages((unsigned long)xen_io_tlb_start, order);
265 xen_io_tlb_start = NULL;
266 }
267 m_ret = XEN_SWIOTLB_EFIXUP;
268 goto error;
269 }
270 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
271 if (early) {
272 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
273 verbose))
274 panic("Cannot allocate SWIOTLB buffer");
275 rc = 0;
276 } else
277 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
278 return rc;
279 error:
280 if (repeat--) {
281 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
282 (xen_io_tlb_nslabs >> 1));
283 pr_info("Lowering to %luMB\n",
284 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
285 goto retry;
286 }
287 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
288 if (early)
289 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
290 else
291 free_pages((unsigned long)xen_io_tlb_start, order);
292 return rc;
293 }
294 void *
xen_swiotlb_alloc_coherent(struct device * hwdev,size_t size,dma_addr_t * dma_handle,gfp_t flags,struct dma_attrs * attrs)295 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
296 dma_addr_t *dma_handle, gfp_t flags,
297 struct dma_attrs *attrs)
298 {
299 void *ret;
300 int order = get_order(size);
301 u64 dma_mask = DMA_BIT_MASK(32);
302 phys_addr_t phys;
303 dma_addr_t dev_addr;
304
305 /*
306 * Ignore region specifiers - the kernel's ideas of
307 * pseudo-phys memory layout has nothing to do with the
308 * machine physical layout. We can't allocate highmem
309 * because we can't return a pointer to it.
310 */
311 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
312
313 /* Convert the size to actually allocated. */
314 size = 1UL << (order + XEN_PAGE_SHIFT);
315
316 /* On ARM this function returns an ioremap'ped virtual address for
317 * which virt_to_phys doesn't return the corresponding physical
318 * address. In fact on ARM virt_to_phys only works for kernel direct
319 * mapped RAM memory. Also see comment below.
320 */
321 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
322
323 if (!ret)
324 return ret;
325
326 if (hwdev && hwdev->coherent_dma_mask)
327 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
328
329 /* At this point dma_handle is the physical address, next we are
330 * going to set it to the machine address.
331 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
332 * to *dma_handle. */
333 phys = *dma_handle;
334 dev_addr = xen_phys_to_bus(phys);
335 if (((dev_addr + size - 1 <= dma_mask)) &&
336 !range_straddles_page_boundary(phys, size))
337 *dma_handle = dev_addr;
338 else {
339 if (xen_create_contiguous_region(phys, order,
340 fls64(dma_mask), dma_handle) != 0) {
341 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
342 return NULL;
343 }
344 }
345 memset(ret, 0, size);
346 return ret;
347 }
348 EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
349
350 void
xen_swiotlb_free_coherent(struct device * hwdev,size_t size,void * vaddr,dma_addr_t dev_addr,struct dma_attrs * attrs)351 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
352 dma_addr_t dev_addr, struct dma_attrs *attrs)
353 {
354 int order = get_order(size);
355 phys_addr_t phys;
356 u64 dma_mask = DMA_BIT_MASK(32);
357
358 if (hwdev && hwdev->coherent_dma_mask)
359 dma_mask = hwdev->coherent_dma_mask;
360
361 /* do not use virt_to_phys because on ARM it doesn't return you the
362 * physical address */
363 phys = xen_bus_to_phys(dev_addr);
364
365 /* Convert the size to actually allocated. */
366 size = 1UL << (order + XEN_PAGE_SHIFT);
367
368 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
369 range_straddles_page_boundary(phys, size)))
370 xen_destroy_contiguous_region(phys, order);
371
372 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
373 }
374 EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
375
376
377 /*
378 * Map a single buffer of the indicated size for DMA in streaming mode. The
379 * physical address to use is returned.
380 *
381 * Once the device is given the dma address, the device owns this memory until
382 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
383 */
xen_swiotlb_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)384 dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
385 unsigned long offset, size_t size,
386 enum dma_data_direction dir,
387 struct dma_attrs *attrs)
388 {
389 phys_addr_t map, phys = page_to_phys(page) + offset;
390 dma_addr_t dev_addr = xen_phys_to_bus(phys);
391
392 BUG_ON(dir == DMA_NONE);
393 /*
394 * If the address happens to be in the device's DMA window,
395 * we can safely return the device addr and not worry about bounce
396 * buffering it.
397 */
398 if (dma_capable(dev, dev_addr, size) &&
399 !range_straddles_page_boundary(phys, size) &&
400 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
401 !swiotlb_force) {
402 /* we are not interested in the dma_addr returned by
403 * xen_dma_map_page, only in the potential cache flushes executed
404 * by the function. */
405 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
406 return dev_addr;
407 }
408
409 /*
410 * Oh well, have to allocate and map a bounce buffer.
411 */
412 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
413
414 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
415 if (map == SWIOTLB_MAP_ERROR)
416 return DMA_ERROR_CODE;
417
418 dev_addr = xen_phys_to_bus(map);
419 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
420 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
421
422 /*
423 * Ensure that the address returned is DMA'ble
424 */
425 if (!dma_capable(dev, dev_addr, size)) {
426 swiotlb_tbl_unmap_single(dev, map, size, dir);
427 dev_addr = 0;
428 }
429 return dev_addr;
430 }
431 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
432
433 /*
434 * Unmap a single streaming mode DMA translation. The dma_addr and size must
435 * match what was provided for in a previous xen_swiotlb_map_page call. All
436 * other usages are undefined.
437 *
438 * After this call, reads by the cpu to the buffer are guaranteed to see
439 * whatever the device wrote there.
440 */
xen_unmap_single(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)441 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
442 size_t size, enum dma_data_direction dir,
443 struct dma_attrs *attrs)
444 {
445 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
446
447 BUG_ON(dir == DMA_NONE);
448
449 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
450
451 /* NOTE: We use dev_addr here, not paddr! */
452 if (is_xen_swiotlb_buffer(dev_addr)) {
453 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
454 return;
455 }
456
457 if (dir != DMA_FROM_DEVICE)
458 return;
459
460 /*
461 * phys_to_virt doesn't work with hihgmem page but we could
462 * call dma_mark_clean() with hihgmem page here. However, we
463 * are fine since dma_mark_clean() is null on POWERPC. We can
464 * make dma_mark_clean() take a physical address if necessary.
465 */
466 dma_mark_clean(phys_to_virt(paddr), size);
467 }
468
xen_swiotlb_unmap_page(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)469 void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
470 size_t size, enum dma_data_direction dir,
471 struct dma_attrs *attrs)
472 {
473 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
474 }
475 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
476
477 /*
478 * Make physical memory consistent for a single streaming mode DMA translation
479 * after a transfer.
480 *
481 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
482 * using the cpu, yet do not wish to teardown the dma mapping, you must
483 * call this function before doing so. At the next point you give the dma
484 * address back to the card, you must first perform a
485 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
486 */
487 static void
xen_swiotlb_sync_single(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir,enum dma_sync_target target)488 xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
489 size_t size, enum dma_data_direction dir,
490 enum dma_sync_target target)
491 {
492 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
493
494 BUG_ON(dir == DMA_NONE);
495
496 if (target == SYNC_FOR_CPU)
497 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
498
499 /* NOTE: We use dev_addr here, not paddr! */
500 if (is_xen_swiotlb_buffer(dev_addr))
501 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
502
503 if (target == SYNC_FOR_DEVICE)
504 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
505
506 if (dir != DMA_FROM_DEVICE)
507 return;
508
509 dma_mark_clean(phys_to_virt(paddr), size);
510 }
511
512 void
xen_swiotlb_sync_single_for_cpu(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir)513 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
514 size_t size, enum dma_data_direction dir)
515 {
516 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
517 }
518 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
519
520 void
xen_swiotlb_sync_single_for_device(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir)521 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
522 size_t size, enum dma_data_direction dir)
523 {
524 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
525 }
526 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
527
528 /*
529 * Map a set of buffers described by scatterlist in streaming mode for DMA.
530 * This is the scatter-gather version of the above xen_swiotlb_map_page
531 * interface. Here the scatter gather list elements are each tagged with the
532 * appropriate dma address and length. They are obtained via
533 * sg_dma_{address,length}(SG).
534 *
535 * NOTE: An implementation may be able to use a smaller number of
536 * DMA address/length pairs than there are SG table elements.
537 * (for example via virtual mapping capabilities)
538 * The routine returns the number of addr/length pairs actually
539 * used, at most nents.
540 *
541 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
542 * same here.
543 */
544 int
xen_swiotlb_map_sg_attrs(struct device * hwdev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,struct dma_attrs * attrs)545 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
546 int nelems, enum dma_data_direction dir,
547 struct dma_attrs *attrs)
548 {
549 struct scatterlist *sg;
550 int i;
551
552 BUG_ON(dir == DMA_NONE);
553
554 for_each_sg(sgl, sg, nelems, i) {
555 phys_addr_t paddr = sg_phys(sg);
556 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
557
558 if (swiotlb_force ||
559 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
560 !dma_capable(hwdev, dev_addr, sg->length) ||
561 range_straddles_page_boundary(paddr, sg->length)) {
562 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
563 start_dma_addr,
564 sg_phys(sg),
565 sg->length,
566 dir);
567 if (map == SWIOTLB_MAP_ERROR) {
568 dev_warn(hwdev, "swiotlb buffer is full\n");
569 /* Don't panic here, we expect map_sg users
570 to do proper error handling. */
571 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
572 attrs);
573 sg_dma_len(sgl) = 0;
574 return 0;
575 }
576 dev_addr = xen_phys_to_bus(map);
577 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
578 dev_addr,
579 map & ~PAGE_MASK,
580 sg->length,
581 dir,
582 attrs);
583 sg->dma_address = dev_addr;
584 } else {
585 /* we are not interested in the dma_addr returned by
586 * xen_dma_map_page, only in the potential cache flushes executed
587 * by the function. */
588 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
589 dev_addr,
590 paddr & ~PAGE_MASK,
591 sg->length,
592 dir,
593 attrs);
594 sg->dma_address = dev_addr;
595 }
596 sg_dma_len(sg) = sg->length;
597 }
598 return nelems;
599 }
600 EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
601
602 /*
603 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
604 * concerning calls here are the same as for swiotlb_unmap_page() above.
605 */
606 void
xen_swiotlb_unmap_sg_attrs(struct device * hwdev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,struct dma_attrs * attrs)607 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
608 int nelems, enum dma_data_direction dir,
609 struct dma_attrs *attrs)
610 {
611 struct scatterlist *sg;
612 int i;
613
614 BUG_ON(dir == DMA_NONE);
615
616 for_each_sg(sgl, sg, nelems, i)
617 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
618
619 }
620 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
621
622 /*
623 * Make physical memory consistent for a set of streaming mode DMA translations
624 * after a transfer.
625 *
626 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
627 * and usage.
628 */
629 static void
xen_swiotlb_sync_sg(struct device * hwdev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,enum dma_sync_target target)630 xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
631 int nelems, enum dma_data_direction dir,
632 enum dma_sync_target target)
633 {
634 struct scatterlist *sg;
635 int i;
636
637 for_each_sg(sgl, sg, nelems, i)
638 xen_swiotlb_sync_single(hwdev, sg->dma_address,
639 sg_dma_len(sg), dir, target);
640 }
641
642 void
xen_swiotlb_sync_sg_for_cpu(struct device * hwdev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)643 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
644 int nelems, enum dma_data_direction dir)
645 {
646 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
647 }
648 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
649
650 void
xen_swiotlb_sync_sg_for_device(struct device * hwdev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)651 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
652 int nelems, enum dma_data_direction dir)
653 {
654 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
655 }
656 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
657
658 int
xen_swiotlb_dma_mapping_error(struct device * hwdev,dma_addr_t dma_addr)659 xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
660 {
661 return !dma_addr;
662 }
663 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
664
665 /*
666 * Return whether the given device DMA address mask can be supported
667 * properly. For example, if your device can only drive the low 24-bits
668 * during bus mastering, then you would pass 0x00ffffff as the mask to
669 * this function.
670 */
671 int
xen_swiotlb_dma_supported(struct device * hwdev,u64 mask)672 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
673 {
674 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
675 }
676 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
677
678 int
xen_swiotlb_set_dma_mask(struct device * dev,u64 dma_mask)679 xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
680 {
681 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
682 return -EIO;
683
684 *dev->dma_mask = dma_mask;
685
686 return 0;
687 }
688 EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
689
690 /*
691 * Create userspace mapping for the DMA-coherent memory.
692 * This function should be called with the pages from the current domain only,
693 * passing pages mapped from other domains would lead to memory corruption.
694 */
695 int
xen_swiotlb_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,struct dma_attrs * attrs)696 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
697 void *cpu_addr, dma_addr_t dma_addr, size_t size,
698 struct dma_attrs *attrs)
699 {
700 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
701 if (__generic_dma_ops(dev)->mmap)
702 return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
703 dma_addr, size, attrs);
704 #endif
705 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
706 }
707 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
708