1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
7 *
8 */
9 #ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
10 #define __ASM_MACH_IP32_DMA_COHERENCE_H
11
12 #include <asm/ip32/crime.h>
13
14 struct device;
15
16 /*
17 * Few notes.
18 * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
19 * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
20 * native-endian)
21 * 3. All other devices see memory as one big chunk at 0x40000000
22 * 4. Non-PCI devices will pass NULL as struct device*
23 *
24 * Thus we translate differently, depending on device.
25 */
26
27 #define RAM_OFFSET_MASK 0x3fffffffUL
28
plat_map_dma_mem(struct device * dev,void * addr,size_t size)29 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
30 size_t size)
31 {
32 dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
33
34 if (dev == NULL)
35 pa += CRIME_HI_MEM_BASE;
36
37 return pa;
38 }
39
plat_map_dma_mem_page(struct device * dev,struct page * page)40 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
41 struct page *page)
42 {
43 dma_addr_t pa;
44
45 pa = page_to_phys(page) & RAM_OFFSET_MASK;
46
47 if (dev == NULL)
48 pa += CRIME_HI_MEM_BASE;
49
50 return pa;
51 }
52
53 /* This is almost certainly wrong but it's what dma-ip32.c used to use */
plat_dma_addr_to_phys(struct device * dev,dma_addr_t dma_addr)54 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
55 dma_addr_t dma_addr)
56 {
57 unsigned long addr = dma_addr & RAM_OFFSET_MASK;
58
59 if (dma_addr >= 256*1024*1024)
60 addr += CRIME_HI_MEM_BASE;
61
62 return addr;
63 }
64
plat_unmap_dma_mem(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)65 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
66 size_t size, enum dma_data_direction direction)
67 {
68 }
69
plat_dma_supported(struct device * dev,u64 mask)70 static inline int plat_dma_supported(struct device *dev, u64 mask)
71 {
72 /*
73 * we fall back to GFP_DMA when the mask isn't all 1s,
74 * so we can't guarantee allocations that must be
75 * within a tighter range than GFP_DMA..
76 */
77 if (mask < DMA_BIT_MASK(24))
78 return 0;
79
80 return 1;
81 }
82
plat_post_dma_flush(struct device * dev)83 static inline void plat_post_dma_flush(struct device *dev)
84 {
85 }
86
plat_device_is_coherent(struct device * dev)87 static inline int plat_device_is_coherent(struct device *dev)
88 {
89 return 0; /* IP32 is non-coherent */
90 }
91
92 #endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
93