• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* dma-alloc.c: consistent DMA memory allocation
2  *
3  * Derived from arch/ppc/mm/cachemap.c
4  *
5  *  PowerPC version derived from arch/arm/mm/consistent.c
6  *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
7  *
8  *  linux/arch/arm/mm/consistent.c
9  *
10  *  Copyright (C) 2000 Russell King
11  *
12  * Consistent memory allocators.  Used for DMA devices that want to
13  * share uncached memory with the processor core.  The function return
14  * is the virtual address and 'dma_handle' is the physical address.
15  * Mostly stolen from the ARM port, with some changes for PowerPC.
16  *						-- Dan
17  * Modified for 36-bit support.  -Matt
18  *
19  * This program is free software; you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License version 2 as
21  * published by the Free Software Foundation.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
33 #include <linux/mm.h>
34 #include <linux/swap.h>
35 #include <linux/stddef.h>
36 #include <linux/vmalloc.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/hardirq.h>
40 #include <linux/gfp.h>
41 
42 #include <asm/pgalloc.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/mmu.h>
47 #include <asm/uaccess.h>
48 #include <asm/smp.h>
49 
map_page(unsigned long va,unsigned long pa,pgprot_t prot)50 static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
51 {
52 	pgd_t *pge;
53 	pud_t *pue;
54 	pmd_t *pme;
55 	pte_t *pte;
56 	int err = -ENOMEM;
57 
58 	/* Use upper 10 bits of VA to index the first level map */
59 	pge = pgd_offset_k(va);
60 	pue = pud_offset(pge, va);
61 	pme = pmd_offset(pue, va);
62 
63 	/* Use middle 10 bits of VA to index the second-level map */
64 	pte = pte_alloc_kernel(pme, va);
65 	if (pte != 0) {
66 		err = 0;
67 		set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
68 	}
69 
70 	return err;
71 }
72 
73 /*
74  * This function will allocate the requested contiguous pages and
75  * map them into the kernel's vmalloc() space.  This is done so we
76  * get unique mapping for these pages, outside of the kernel's 1:1
77  * virtual:physical mapping.  This is necessary so we can cover large
78  * portions of the kernel with single large page TLB entries, and
79  * still get unique uncached pages for consistent DMA.
80  */
consistent_alloc(gfp_t gfp,size_t size,dma_addr_t * dma_handle)81 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
82 {
83 	struct vm_struct *area;
84 	unsigned long page, va, pa;
85 	void *ret;
86 	int order, err, i;
87 
88 	if (in_interrupt())
89 		BUG();
90 
91 	/* only allocate page size areas */
92 	size = PAGE_ALIGN(size);
93 	order = get_order(size);
94 
95 	page = __get_free_pages(gfp, order);
96 	if (!page) {
97 		BUG();
98 		return NULL;
99 	}
100 
101 	/* allocate some common virtual space to map the new pages */
102 	area = get_vm_area(size, VM_ALLOC);
103 	if (area == 0) {
104 		free_pages(page, order);
105 		return NULL;
106 	}
107 	va = VMALLOC_VMADDR(area->addr);
108 	ret = (void *) va;
109 
110 	/* this gives us the real physical address of the first page */
111 	*dma_handle = pa = virt_to_bus((void *) page);
112 
113 	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
114 	 * all pages that were allocated.
115 	 */
116 	if (order > 0) {
117 		struct page *rpage = virt_to_page(page);
118 		split_page(rpage, order);
119 	}
120 
121 	err = 0;
122 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
123 		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);
124 
125 	if (err) {
126 		vfree((void *) va);
127 		return NULL;
128 	}
129 
130 	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
131 	 * - can't do until after virtual address mappings are created
132 	 */
133 	frv_cache_invalidate(va, va + size);
134 
135 	return ret;
136 }
137 
138 /*
139  * free page(s) as defined by the above mapping.
140  */
consistent_free(void * vaddr)141 void consistent_free(void *vaddr)
142 {
143 	if (in_interrupt())
144 		BUG();
145 	vfree(vaddr);
146 }
147 
148 /*
149  * make an area consistent.
150  */
consistent_sync(void * vaddr,size_t size,int direction)151 void consistent_sync(void *vaddr, size_t size, int direction)
152 {
153 	unsigned long start = (unsigned long) vaddr;
154 	unsigned long end   = start + size;
155 
156 	switch (direction) {
157 	case PCI_DMA_NONE:
158 		BUG();
159 	case PCI_DMA_FROMDEVICE:	/* invalidate only */
160 		frv_cache_invalidate(start, end);
161 		break;
162 	case PCI_DMA_TODEVICE:		/* writeback only */
163 		frv_dcache_writeback(start, end);
164 		break;
165 	case PCI_DMA_BIDIRECTIONAL:	/* writeback and invalidate */
166 		frv_dcache_writeback(start, end);
167 		break;
168 	}
169 }
170 
171 /*
172  * consistent_sync_page make a page are consistent. identical
173  * to consistent_sync, but takes a struct page instead of a virtual address
174  */
175 
consistent_sync_page(struct page * page,unsigned long offset,size_t size,int direction)176 void consistent_sync_page(struct page *page, unsigned long offset,
177 			  size_t size, int direction)
178 {
179 	void *start;
180 
181 	start = page_address(page) + offset;
182 	consistent_sync(start, size, direction);
183 }
184