• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* dma-alloc.c: consistent DMA memory allocation
2  *
3  * Derived from arch/ppc/mm/cachemap.c
4  *
5  *  PowerPC version derived from arch/arm/mm/consistent.c
6  *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
7  *
8  *  linux/arch/arm/mm/consistent.c
9  *
10  *  Copyright (C) 2000 Russell King
11  *
12  * Consistent memory allocators.  Used for DMA devices that want to
13  * share uncached memory with the processor core.  The function return
14  * is the virtual address and 'dma_handle' is the physical address.
15  * Mostly stolen from the ARM port, with some changes for PowerPC.
16  *						-- Dan
17  * Modified for 36-bit support.  -Matt
18  *
19  * This program is free software; you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License version 2 as
21  * published by the Free Software Foundation.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
33 #include <linux/mm.h>
34 #include <linux/swap.h>
35 #include <linux/stddef.h>
36 #include <linux/vmalloc.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/hardirq.h>
40 
41 #include <asm/pgalloc.h>
42 #include <asm/io.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/mmu.h>
46 #include <asm/uaccess.h>
47 #include <asm/smp.h>
48 
map_page(unsigned long va,unsigned long pa,pgprot_t prot)49 static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
50 {
51 	pgd_t *pge;
52 	pud_t *pue;
53 	pmd_t *pme;
54 	pte_t *pte;
55 	int err = -ENOMEM;
56 
57 	/* Use upper 10 bits of VA to index the first level map */
58 	pge = pgd_offset_k(va);
59 	pue = pud_offset(pge, va);
60 	pme = pmd_offset(pue, va);
61 
62 	/* Use middle 10 bits of VA to index the second-level map */
63 	pte = pte_alloc_kernel(pme, va);
64 	if (pte != 0) {
65 		err = 0;
66 		set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
67 	}
68 
69 	return err;
70 }
71 
72 /*
73  * This function will allocate the requested contiguous pages and
74  * map them into the kernel's vmalloc() space.  This is done so we
75  * get unique mapping for these pages, outside of the kernel's 1:1
76  * virtual:physical mapping.  This is necessary so we can cover large
77  * portions of the kernel with single large page TLB entries, and
78  * still get unique uncached pages for consistent DMA.
79  */
consistent_alloc(gfp_t gfp,size_t size,dma_addr_t * dma_handle)80 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
81 {
82 	struct vm_struct *area;
83 	unsigned long page, va, pa;
84 	void *ret;
85 	int order, err, i;
86 
87 	if (in_interrupt())
88 		BUG();
89 
90 	/* only allocate page size areas */
91 	size = PAGE_ALIGN(size);
92 	order = get_order(size);
93 
94 	page = __get_free_pages(gfp, order);
95 	if (!page) {
96 		BUG();
97 		return NULL;
98 	}
99 
100 	/* allocate some common virtual space to map the new pages */
101 	area = get_vm_area(size, VM_ALLOC);
102 	if (area == 0) {
103 		free_pages(page, order);
104 		return NULL;
105 	}
106 	va = VMALLOC_VMADDR(area->addr);
107 	ret = (void *) va;
108 
109 	/* this gives us the real physical address of the first page */
110 	*dma_handle = pa = virt_to_bus((void *) page);
111 
112 	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
113 	 * all pages that were allocated.
114 	 */
115 	if (order > 0) {
116 		struct page *rpage = virt_to_page(page);
117 		split_page(rpage, order);
118 	}
119 
120 	err = 0;
121 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
122 		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);
123 
124 	if (err) {
125 		vfree((void *) va);
126 		return NULL;
127 	}
128 
129 	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
130 	 * - can't do until after virtual address mappings are created
131 	 */
132 	frv_cache_invalidate(va, va + size);
133 
134 	return ret;
135 }
136 
137 /*
138  * free page(s) as defined by the above mapping.
139  */
consistent_free(void * vaddr)140 void consistent_free(void *vaddr)
141 {
142 	if (in_interrupt())
143 		BUG();
144 	vfree(vaddr);
145 }
146 
147 /*
148  * make an area consistent.
149  */
consistent_sync(void * vaddr,size_t size,int direction)150 void consistent_sync(void *vaddr, size_t size, int direction)
151 {
152 	unsigned long start = (unsigned long) vaddr;
153 	unsigned long end   = start + size;
154 
155 	switch (direction) {
156 	case PCI_DMA_NONE:
157 		BUG();
158 	case PCI_DMA_FROMDEVICE:	/* invalidate only */
159 		frv_cache_invalidate(start, end);
160 		break;
161 	case PCI_DMA_TODEVICE:		/* writeback only */
162 		frv_dcache_writeback(start, end);
163 		break;
164 	case PCI_DMA_BIDIRECTIONAL:	/* writeback and invalidate */
165 		frv_dcache_writeback(start, end);
166 		break;
167 	}
168 }
169 
170 /*
171  * consistent_sync_page make a page are consistent. identical
172  * to consistent_sync, but takes a struct page instead of a virtual address
173  */
174 
consistent_sync_page(struct page * page,unsigned long offset,size_t size,int direction)175 void consistent_sync_page(struct page *page, unsigned long offset,
176 			  size_t size, int direction)
177 {
178 	void *start;
179 
180 	start = page_address(page) + offset;
181 	consistent_sync(start, size, direction);
182 }
183