• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2006, 07  Ralf Baechle <ralf@linux-mips.org>
7  * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
8  * Author: Fuxin Zhang, zhangfx@lemote.com
9  *
10  */
11 #ifndef __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
12 #define __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
13 
14 #ifdef CONFIG_SWIOTLB
15 #include <linux/swiotlb.h>
16 #endif
17 
18 struct device;
19 
20 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
21 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
plat_map_dma_mem(struct device * dev,void * addr,size_t size)22 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
23 					  size_t size)
24 {
25 #ifdef CONFIG_CPU_LOONGSON3
26 	return phys_to_dma(dev, virt_to_phys(addr));
27 #else
28 	return virt_to_phys(addr) | 0x80000000;
29 #endif
30 }
31 
plat_map_dma_mem_page(struct device * dev,struct page * page)32 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
33 					       struct page *page)
34 {
35 #ifdef CONFIG_CPU_LOONGSON3
36 	return phys_to_dma(dev, page_to_phys(page));
37 #else
38 	return page_to_phys(page) | 0x80000000;
39 #endif
40 }
41 
plat_dma_addr_to_phys(struct device * dev,dma_addr_t dma_addr)42 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
43 	dma_addr_t dma_addr)
44 {
45 #if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
46 	return dma_to_phys(dev, dma_addr);
47 #elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
48 	return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
49 #else
50 	return dma_addr & 0x7fffffff;
51 #endif
52 }
53 
plat_unmap_dma_mem(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)54 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
55 	size_t size, enum dma_data_direction direction)
56 {
57 }
58 
plat_dma_supported(struct device * dev,u64 mask)59 static inline int plat_dma_supported(struct device *dev, u64 mask)
60 {
61 	/*
62 	 * we fall back to GFP_DMA when the mask isn't all 1s,
63 	 * so we can't guarantee allocations that must be
64 	 * within a tighter range than GFP_DMA..
65 	 */
66 	if (mask < DMA_BIT_MASK(24))
67 		return 0;
68 
69 	return 1;
70 }
71 
plat_device_is_coherent(struct device * dev)72 static inline int plat_device_is_coherent(struct device *dev)
73 {
74 #ifdef CONFIG_DMA_NONCOHERENT
75 	return 0;
76 #else
77 	return 1;
78 #endif /* CONFIG_DMA_NONCOHERENT */
79 }
80 
plat_post_dma_flush(struct device * dev)81 static inline void plat_post_dma_flush(struct device *dev)
82 {
83 }
84 
85 #endif /* __ASM_MACH_LOONGSON64_DMA_COHERENCE_H */
86