• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* pci-dma-nommu.c: Dynamic DMA mapping support for the FRV
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Woodhouse (dwmw2@infradead.org)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <asm/io.h>
19 
20 #if 1
21 #define DMA_SRAM_START	dma_coherent_mem_start
22 #define DMA_SRAM_END	dma_coherent_mem_end
23 #else // Use video RAM on Matrox
24 #define DMA_SRAM_START	0xe8900000
25 #define DMA_SRAM_END	0xe8a00000
26 #endif
27 
28 struct dma_alloc_record {
29 	struct list_head	list;
30 	unsigned long		ofs;
31 	unsigned long		len;
32 };
33 
34 static DEFINE_SPINLOCK(dma_alloc_lock);
35 static LIST_HEAD(dma_alloc_list);
36 
dma_alloc_coherent(struct device * hwdev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)37 void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
38 {
39 	struct dma_alloc_record *new;
40 	struct list_head *this = &dma_alloc_list;
41 	unsigned long flags;
42 	unsigned long start = DMA_SRAM_START;
43 	unsigned long end;
44 
45 	if (!DMA_SRAM_START) {
46 		printk("%s called without any DMA area reserved!\n", __func__);
47 		return NULL;
48 	}
49 
50 	new = kmalloc(sizeof (*new), GFP_ATOMIC);
51 	if (!new)
52 		return NULL;
53 
54 	/* Round up to a reasonable alignment */
55 	new->len = (size + 31) & ~31;
56 
57 	spin_lock_irqsave(&dma_alloc_lock, flags);
58 
59 	list_for_each (this, &dma_alloc_list) {
60 		struct dma_alloc_record *this_r = list_entry(this, struct dma_alloc_record, list);
61 		end = this_r->ofs;
62 
63 		if (end - start >= size)
64 			goto gotone;
65 
66 		start = this_r->ofs + this_r->len;
67 	}
68 	/* Reached end of list. */
69 	end = DMA_SRAM_END;
70 	this = &dma_alloc_list;
71 
72 	if (end - start >= size) {
73 	gotone:
74 		new->ofs = start;
75 		list_add_tail(&new->list, this);
76 		spin_unlock_irqrestore(&dma_alloc_lock, flags);
77 
78 		*dma_handle = start;
79 		return (void *)start;
80 	}
81 
82 	kfree(new);
83 	spin_unlock_irqrestore(&dma_alloc_lock, flags);
84 	return NULL;
85 }
86 
87 EXPORT_SYMBOL(dma_alloc_coherent);
88 
dma_free_coherent(struct device * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)89 void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
90 {
91 	struct dma_alloc_record *rec;
92 	unsigned long flags;
93 
94 	spin_lock_irqsave(&dma_alloc_lock, flags);
95 
96 	list_for_each_entry(rec, &dma_alloc_list, list) {
97 		if (rec->ofs == dma_handle) {
98 			list_del(&rec->list);
99 			kfree(rec);
100 			spin_unlock_irqrestore(&dma_alloc_lock, flags);
101 			return;
102 		}
103 	}
104 	spin_unlock_irqrestore(&dma_alloc_lock, flags);
105 	BUG();
106 }
107 
108 EXPORT_SYMBOL(dma_free_coherent);
109 
dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction direction)110 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
111 			  enum dma_data_direction direction)
112 {
113 	BUG_ON(direction == DMA_NONE);
114 
115 	frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
116 
117 	return virt_to_bus(ptr);
118 }
119 
120 EXPORT_SYMBOL(dma_map_single);
121 
dma_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)122 int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
123 	       enum dma_data_direction direction)
124 {
125 	int i;
126 	struct scatterlist *sg;
127 
128 	for_each_sg(sglist, sg, nents, i) {
129 		frv_cache_wback_inv(sg_dma_address(sg),
130 				    sg_dma_address(sg) + sg_dma_len(sg));
131 	}
132 
133 	BUG_ON(direction == DMA_NONE);
134 
135 	return nents;
136 }
137 
138 EXPORT_SYMBOL(dma_map_sg);
139 
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)140 dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
141 			size_t size, enum dma_data_direction direction)
142 {
143 	BUG_ON(direction == DMA_NONE);
144 	flush_dcache_page(page);
145 	return (dma_addr_t) page_to_phys(page) + offset;
146 }
147 
148 EXPORT_SYMBOL(dma_map_page);
149